hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f724861c26fdb7becc18c6a8a70a39ab6cf71c08 | 6,782 | py | Python | sdk/lusid/models/resource_list_of_get_counterparty_response.py | mneedham/lusid-sdk-python-preview | f4494009d1a2f3431d931c813cab679bdbd92c84 | [
"MIT"
] | null | null | null | sdk/lusid/models/resource_list_of_get_counterparty_response.py | mneedham/lusid-sdk-python-preview | f4494009d1a2f3431d931c813cab679bdbd92c84 | [
"MIT"
] | null | null | null | sdk/lusid/models/resource_list_of_get_counterparty_response.py | mneedham/lusid-sdk-python-preview | f4494009d1a2f3431d931c813cab679bdbd92c84 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
LUSID API
FINBOURNE Technology # noqa: E501
The version of the OpenAPI document: 0.11.3192
Contact: info@finbourne.com
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class ResourceListOfGetCounterpartyResponse(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
required_map (dict): The key is attribute name
and the value is whether it is 'required' or 'optional'.
"""
openapi_types = {
'values': 'list[GetCounterpartyResponse]',
'href': 'str',
'links': 'list[Link]',
'next_page': 'str',
'previous_page': 'str'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links',
'next_page': 'nextPage',
'previous_page': 'previousPage'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional',
'next_page': 'optional',
'previous_page': 'optional'
}
def __init__(self, values=None, href=None, links=None, next_page=None, previous_page=None): # noqa: E501
"""
ResourceListOfGetCounterpartyResponse - a model defined in OpenAPI
:param values: (required)
:type values: list[lusid.GetCounterpartyResponse]
:param href:
:type href: str
:param links:
:type links: list[lusid.Link]
:param next_page:
:type next_page: str
:param previous_page:
:type previous_page: str
""" # noqa: E501
self._values = None
self._href = None
self._links = None
self._next_page = None
self._previous_page = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
self.next_page = next_page
self.previous_page = previous_page
@property
def values(self):
"""Gets the values of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:return: The values of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:rtype: list[GetCounterpartyResponse]
"""
return self._values
@values.setter
def values(self, values):
"""Sets the values of this ResourceListOfGetCounterpartyResponse.
:param values: The values of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:type: list[GetCounterpartyResponse]
"""
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`") # noqa: E501
self._values = values
@property
def href(self):
"""Gets the href of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:return: The href of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:rtype: str
"""
return self._href
@href.setter
def href(self, href):
"""Sets the href of this ResourceListOfGetCounterpartyResponse.
:param href: The href of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:type: str
"""
self._href = href
@property
def links(self):
"""Gets the links of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:return: The links of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:rtype: list[Link]
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this ResourceListOfGetCounterpartyResponse.
:param links: The links of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:type: list[Link]
"""
self._links = links
@property
def next_page(self):
"""Gets the next_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:return: The next_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:rtype: str
"""
return self._next_page
@next_page.setter
def next_page(self, next_page):
"""Sets the next_page of this ResourceListOfGetCounterpartyResponse.
:param next_page: The next_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:type: str
"""
self._next_page = next_page
@property
def previous_page(self):
"""Gets the previous_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:return: The previous_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:rtype: str
"""
return self._previous_page
@previous_page.setter
def previous_page(self, previous_page):
"""Sets the previous_page of this ResourceListOfGetCounterpartyResponse.
:param previous_page: The previous_page of this ResourceListOfGetCounterpartyResponse. # noqa: E501
:type: str
"""
self._previous_page = previous_page
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ResourceListOfGetCounterpartyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.495798 | 109 | 0.599823 |
import pprint
import re
import six
class ResourceListOfGetCounterpartyResponse(object):
openapi_types = {
'values': 'list[GetCounterpartyResponse]',
'href': 'str',
'links': 'list[Link]',
'next_page': 'str',
'previous_page': 'str'
}
attribute_map = {
'values': 'values',
'href': 'href',
'links': 'links',
'next_page': 'nextPage',
'previous_page': 'previousPage'
}
required_map = {
'values': 'required',
'href': 'optional',
'links': 'optional',
'next_page': 'optional',
'previous_page': 'optional'
}
def __init__(self, values=None, href=None, links=None, next_page=None, previous_page=None):
self._values = None
self._href = None
self._links = None
self._next_page = None
self._previous_page = None
self.discriminator = None
self.values = values
self.href = href
self.links = links
self.next_page = next_page
self.previous_page = previous_page
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if values is None:
raise ValueError("Invalid value for `values`, must not be `None`")
self._values = values
@property
def href(self):
return self._href
@href.setter
def href(self, href):
self._href = href
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
@property
def next_page(self):
return self._next_page
@next_page.setter
def next_page(self, next_page):
self._next_page = next_page
@property
def previous_page(self):
return self._previous_page
@previous_page.setter
def previous_page(self, previous_page):
self._previous_page = previous_page
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, ResourceListOfGetCounterpartyResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f72487670d1fd26ffb29aca415344110891be0c5 | 4,468 | py | Python | solidata_api/_models/models_dataset_output.py | entrepreneur-interet-general/solidata_backend | 08ba9151069f2f633461f5166b1954fdeac7854a | [
"MIT"
] | 7 | 2018-12-13T09:50:26.000Z | 2022-03-25T23:59:26.000Z | solidata_api/_models/models_dataset_output.py | entrepreneur-interet-general/solidata_backend | 08ba9151069f2f633461f5166b1954fdeac7854a | [
"MIT"
] | 35 | 2018-10-16T09:06:40.000Z | 2019-03-27T11:12:11.000Z | solidata_api/_models/models_dataset_output.py | entrepreneur-interet-general/solidata_backend | 08ba9151069f2f633461f5166b1954fdeac7854a | [
"MIT"
] | 2 | 2019-02-16T15:19:05.000Z | 2019-02-19T19:27:44.000Z | # -*- encoding: utf-8 -*-
"""
_models/models_dataset_outputs.py
"""
from log_config import log, pformat
log.debug("... loading models_dataset_outputs.py ...")
from flask_restplus import fields
### import data serializers
from solidata_api._serializers.schema_logs import *
from solidata_api._serializers.schema_generic import *
# from solidata_api._serializers.schema_projects import *
### import generic models functions
from solidata_api._models.models_generic import *
### create models from serializers
# nested models : https://github.com/noirbizarre/flask-restplus/issues/8
# model_user_infos = ns.model( "User model", user_infos) #, mask="{name,surname,email}" )
class NewDso :
"""
Model to display / marshal dso basic form
"""
def __init__(self, ns_):
self.mod = ns_.model( "Dso_basics", {**doc_basics_licence, **open_level_edit_show} )
@property
def model(self):
return self.mod
class Dso_infos :
"""
Model to display / marshal
dataset output
"""
def __init__(self, ns_) :
model_type = "Dso"
### SELF MODULES
self._id = oid_field
self.basic_infos = create_model_basic_infos( ns_, model_name=model_type+"_infos", need_licence=True)
self.public_auth = create_model_public_auth( ns_, model_name=model_type+"_public_auth")
self.specs = create_model_specs( ns_, model_name=model_type+"_specs", )
self.log = create_model_log( ns_, model_name=model_type+"_log", include_is_running=True, include_is_loaded=True )
self.modif_log = create_model_modif_log( ns_, model_name=model_type+"_modif_log")
self.uses = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "usr", "prj" ])
self.uses_light = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "prj" ])
self.datasets = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ])
self.datasets_light = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ], is_light=True )
self.translations = create_model_translations(ns_, model_name=model_type+"_translations")
self.team = create_model_team( ns_, model_name=model_type+"_team")
self.team_light = create_model_team( ns_, model_name=model_type+"_team", is_light=True)
self.data_raw = create_model_data_raw( ns_, model_name=model_type+"_data_raw", schema="dso" )
self.model_id = {
'_id' : self._id,
}
self.model_in = {
'modif_log' : self.modif_log ,
"datasets" : self.datasets ,
}
self.model_min = {
'infos' : self.basic_infos,
'public_auth' : self.public_auth,
'specs' : self.specs ,
'log' : self.log ,
'translations' : self.translations,
}
self.mod_data_raw ={
'data_raw' : self.data_raw,
}
self.model_team_full = {
'team' : self.team ,
}
self.model_team_light = {
'team' : self.team_light,
}
self.model_uses = {
'uses' : self.uses,
}
self.model_uses_light = {
'uses' : self.uses_light,
}
self.model_datasets_light = {
'datasets' : self.datasets_light,
}
### IN / complete data to enter in DB
self.mod_complete_in = ns_.model(model_type+"_in",
{
**self.model_min,
**self.model_in,
**self.model_team_full,
**self.model_uses,
**self.mod_data_raw,
}
)
### OUT COMPLETE / complete data to get out of DB
self.mod_complete_out = ns_.model(model_type+"_out",
{
**self.model_min,
**self.model_in,
**self.model_id,
**self.model_team_full,
**self.model_uses,
**self.mod_data_raw,
}
)
### OUT GUEST / complete data to get out of DB
self.mod_guest_out = ns_.model(model_type+"_guest_out",
{
**self.model_min,
**self.model_in,
**self.model_id,
**self.model_team_light,
**self.model_uses_light,
**self.mod_data_raw,
}
)
### MIN / minimum data to marshall out
self.mod_minimum = ns_.model(model_type+"_minimum",
{
**self.model_min,
**self.model_id,
**self.model_uses_light,
**self.model_datasets_light,
**self.mod_data_raw,
}
)
@property
def model_complete_in(self):
return self.mod_complete_in
@property
def model_complete_out(self):
return self.mod_complete_out
@property
def model_guest_out(self):
return self.mod_guest_out
@property
def model_minimum(self):
return self.mod_minimum
| 26.282353 | 133 | 0.673679 |
from log_config import log, pformat
log.debug("... loading models_dataset_outputs.py ...")
from flask_restplus import fields
t *
from solidata_api._serializers.schema_generic import *
open_level_edit_show} )
@property
def model(self):
return self.mod
class Dso_infos :
def __init__(self, ns_) :
model_type = "Dso"
self.basic_infos = create_model_basic_infos( ns_, model_name=model_type+"_infos", need_licence=True)
self.public_auth = create_model_public_auth( ns_, model_name=model_type+"_public_auth")
self.specs = create_model_specs( ns_, model_name=model_type+"_specs", )
self.log = create_model_log( ns_, model_name=model_type+"_log", include_is_running=True, include_is_loaded=True )
self.modif_log = create_model_modif_log( ns_, model_name=model_type+"_modif_log")
self.uses = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "usr", "prj" ])
self.uses_light = create_model_uses( ns_, model_name=model_type+"_uses", schema_list=[ "prj" ])
self.datasets = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ])
self.datasets_light = create_model_datasets( ns_, model_name=model_type+"_datasets", schema_list=[ "dsi","tag" ], is_light=True )
self.translations = create_model_translations(ns_, model_name=model_type+"_translations")
self.team = create_model_team( ns_, model_name=model_type+"_team")
self.team_light = create_model_team( ns_, model_name=model_type+"_team", is_light=True)
self.data_raw = create_model_data_raw( ns_, model_name=model_type+"_data_raw", schema="dso" )
self.model_id = {
'_id' : self._id,
}
self.model_in = {
'modif_log' : self.modif_log ,
"datasets" : self.datasets ,
}
self.model_min = {
'infos' : self.basic_infos,
'public_auth' : self.public_auth,
'specs' : self.specs ,
'log' : self.log ,
'translations' : self.translations,
}
self.mod_data_raw ={
'data_raw' : self.data_raw,
}
self.model_team_full = {
'team' : self.team ,
}
self.model_team_light = {
'team' : self.team_light,
}
self.model_uses = {
'uses' : self.uses,
}
self.model_uses_light = {
'uses' : self.uses_light,
}
self.model_datasets_light = {
'datasets' : self.datasets_light,
}
f.model_min,
**self.model_in,
**self.model_team_full,
**self.model_uses,
**self.mod_data_raw,
}
)
lf.model_in,
**self.model_id,
**self.model_team_full,
**self.model_uses,
**self.mod_data_raw,
}
)
**self.model_in,
**self.model_id,
**self.model_team_light,
**self.model_uses_light,
**self.mod_data_raw,
}
)
odel_min,
**self.model_id,
**self.model_uses_light,
**self.model_datasets_light,
**self.mod_data_raw,
}
)
@property
def model_complete_in(self):
return self.mod_complete_in
@property
def model_complete_out(self):
return self.mod_complete_out
@property
def model_guest_out(self):
return self.mod_guest_out
@property
def model_minimum(self):
return self.mod_minimum
| true | true |
f72487c1401c258eeaef80d0bad2132c073531cf | 8,204 | py | Python | openapi_client/api/dc_graph_get_report_depot_utilization_v1_api.py | vertica/vertica-accelerator-cli | 706925f58a4bfc2876903396db72363f673be76a | [
"Apache-2.0"
] | null | null | null | openapi_client/api/dc_graph_get_report_depot_utilization_v1_api.py | vertica/vertica-accelerator-cli | 706925f58a4bfc2876903396db72363f673be76a | [
"Apache-2.0"
] | null | null | null | openapi_client/api/dc_graph_get_report_depot_utilization_v1_api.py | vertica/vertica-accelerator-cli | 706925f58a4bfc2876903396db72363f673be76a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
VAAS API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 0.0.1
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class DcGraphGetReportDepotUtilizationV1Api(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v1_vaas_reports_dbname_depot_utilization_get(self, dbname, module, **kwargs): # noqa: E501
"""v1_vaas_reports_dbname_depot_utilization_get # noqa: E501
Get a dc report from certain database. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_vaas_reports_dbname_depot_utilization_get(dbname, module, async_req=True)
>>> result = thread.get()
:param dbname: (required)
:type dbname: str
:param module: Name of the module. (required)
:type module: str
:param time_range: Time range for the report.
:type time_range: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: InlineResponse200
"""
kwargs['_return_http_data_only'] = True
return self.v1_vaas_reports_dbname_depot_utilization_get_with_http_info(dbname, module, **kwargs) # noqa: E501
def v1_vaas_reports_dbname_depot_utilization_get_with_http_info(self, dbname, module, **kwargs): # noqa: E501
"""v1_vaas_reports_dbname_depot_utilization_get # noqa: E501
Get a dc report from certain database. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.v1_vaas_reports_dbname_depot_utilization_get_with_http_info(dbname, module, async_req=True)
>>> result = thread.get()
:param dbname: (required)
:type dbname: str
:param module: Name of the module. (required)
:type module: str
:param time_range: Time range for the report.
:type time_range: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(InlineResponse200, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'dbname',
'module',
'time_range'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_vaas_reports_dbname_depot_utilization_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'dbname' is set
if self.api_client.client_side_validation and ('dbname' not in local_var_params or # noqa: E501
local_var_params['dbname'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `dbname` when calling `v1_vaas_reports_dbname_depot_utilization_get`") # noqa: E501
# verify the required parameter 'module' is set
if self.api_client.client_side_validation and ('module' not in local_var_params or # noqa: E501
local_var_params['module'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `module` when calling `v1_vaas_reports_dbname_depot_utilization_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'dbname' in local_var_params:
path_params['dbname'] = local_var_params['dbname'] # noqa: E501
query_params = []
if 'module' in local_var_params and local_var_params['module'] is not None: # noqa: E501
query_params.append(('module', local_var_params['module'])) # noqa: E501
if 'time_range' in local_var_params and local_var_params['time_range'] is not None: # noqa: E501
query_params.append(('time_range', local_var_params['time_range'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*', 'application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
response_types_map = {
200: "InlineResponse200",
408: None,
500: None,
}
return self.api_client.call_api(
'/v1/vaas/reports/{dbname}/depot-utilization', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 42.507772 | 148 | 0.615553 |
from __future__ import absolute_import
import re
import six
from openapi_client.api_client import ApiClient
from openapi_client.exceptions import (
ApiTypeError,
ApiValueError
)
class DcGraphGetReportDepotUtilizationV1Api(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def v1_vaas_reports_dbname_depot_utilization_get(self, dbname, module, **kwargs):
kwargs['_return_http_data_only'] = True
return self.v1_vaas_reports_dbname_depot_utilization_get_with_http_info(dbname, module, **kwargs)
def v1_vaas_reports_dbname_depot_utilization_get_with_http_info(self, dbname, module, **kwargs):
local_var_params = locals()
all_params = [
'dbname',
'module',
'time_range'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method v1_vaas_reports_dbname_depot_utilization_get" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if self.api_client.client_side_validation and ('dbname' not in local_var_params or
local_var_params['dbname'] is None):
raise ApiValueError("Missing the required parameter `dbname` when calling `v1_vaas_reports_dbname_depot_utilization_get`")
if self.api_client.client_side_validation and ('module' not in local_var_params or
local_var_params['module'] is None):
raise ApiValueError("Missing the required parameter `module` when calling `v1_vaas_reports_dbname_depot_utilization_get`")
collection_formats = {}
path_params = {}
if 'dbname' in local_var_params:
path_params['dbname'] = local_var_params['dbname']
query_params = []
if 'module' in local_var_params and local_var_params['module'] is not None:
query_params.append(('module', local_var_params['module']))
if 'time_range' in local_var_params and local_var_params['time_range'] is not None:
query_params.append(('time_range', local_var_params['time_range']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
header_params['Accept'] = self.api_client.select_header_accept(
['*/*', 'application/json'])
auth_settings = []
response_types_map = {
200: "InlineResponse200",
408: None,
500: None,
}
return self.api_client.call_api(
'/v1/vaas/reports/{dbname}/depot-utilization', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_types_map=response_types_map,
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'),
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| true | true |
f724888e65513eebbb1135160df884f5d67661ab | 22,396 | py | Python | pytest_django/plugin.py | oboynitro/pytest-django | e4ebc59b0037e5623706c738ef8cbf09ecd2425d | [
"BSD-3-Clause"
] | 1 | 2020-10-23T02:46:08.000Z | 2020-10-23T02:46:08.000Z | pytest_django/plugin.py | oboynitro/pytest-django | e4ebc59b0037e5623706c738ef8cbf09ecd2425d | [
"BSD-3-Clause"
] | null | null | null | pytest_django/plugin.py | oboynitro/pytest-django | e4ebc59b0037e5623706c738ef8cbf09ecd2425d | [
"BSD-3-Clause"
] | null | null | null | """A pytest plugin which helps testing Django applications
This plugin handles creating and destroying the test environment and
test database and provides some useful text fixtures.
"""
import contextlib
import inspect
from functools import reduce
import os
import pathlib
import sys
import pytest
from .django_compat import is_django_unittest # noqa
from .fixtures import django_assert_num_queries # noqa
from .fixtures import django_assert_max_num_queries # noqa
from .fixtures import django_db_setup # noqa
from .fixtures import django_db_use_migrations # noqa
from .fixtures import django_db_keepdb # noqa
from .fixtures import django_db_createdb # noqa
from .fixtures import django_db_modify_db_settings # noqa
from .fixtures import django_db_modify_db_settings_parallel_suffix # noqa
from .fixtures import django_db_modify_db_settings_tox_suffix # noqa
from .fixtures import django_db_modify_db_settings_xdist_suffix # noqa
from .fixtures import _live_server_helper # noqa
from .fixtures import admin_client # noqa
from .fixtures import admin_user # noqa
from .fixtures import async_client # noqa
from .fixtures import client # noqa
from .fixtures import db # noqa
from .fixtures import django_user_model # noqa
from .fixtures import django_username_field # noqa
from .fixtures import live_server # noqa
from .fixtures import django_db_reset_sequences # noqa
from .fixtures import async_rf # noqa
from .fixtures import rf # noqa
from .fixtures import settings # noqa
from .fixtures import transactional_db # noqa
from .lazy_django import django_settings_is_configured, skip_if_no_django
SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE"
CONFIGURATION_ENV = "DJANGO_CONFIGURATION"
INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS"
_report_header = []
# ############### pytest hooks ################
def pytest_addoption(parser):
group = parser.getgroup("django")
group.addoption(
"--reuse-db",
action="store_true",
dest="reuse_db",
default=False,
help="Re-use the testing database if it already exists, "
"and do not remove it when the test finishes.",
)
group.addoption(
"--create-db",
action="store_true",
dest="create_db",
default=False,
help="Re-create the database, even if it exists. This "
"option can be used to override --reuse-db.",
)
group.addoption(
"--ds",
action="store",
type=str,
dest="ds",
default=None,
help="Set DJANGO_SETTINGS_MODULE.",
)
group.addoption(
"--dc",
action="store",
type=str,
dest="dc",
default=None,
help="Set DJANGO_CONFIGURATION.",
)
group.addoption(
"--nomigrations",
"--no-migrations",
action="store_true",
dest="nomigrations",
default=False,
help="Disable Django migrations on test setup",
)
group.addoption(
"--migrations",
action="store_false",
dest="nomigrations",
default=False,
help="Enable Django migrations on test setup",
)
parser.addini(
CONFIGURATION_ENV, "django-configurations class to use by pytest-django."
)
group.addoption(
"--liveserver",
default=None,
help="Address and port for the live_server fixture.",
)
parser.addini(
SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django."
)
parser.addini(
"django_find_project",
"Automatically find and add a Django project to the " "Python path.",
type="bool",
default=True,
)
parser.addini(
"django_debug_mode",
"How to set the Django DEBUG setting (default `False`). "
"Use `keep` to not override.",
default="False",
)
group.addoption(
"--fail-on-template-vars",
action="store_true",
dest="itv",
default=False,
help="Fail for invalid variables in templates.",
)
parser.addini(
INVALID_TEMPLATE_VARS_ENV,
"Fail for invalid variables in templates.",
type="bool",
default=False,
)
PROJECT_FOUND = (
"pytest-django found a Django project in %s "
"(it contains manage.py) and added it to the Python path.\n"
'If this is wrong, add "django_find_project = false" to '
"pytest.ini and explicitly manage your Python path."
)
PROJECT_NOT_FOUND = (
"pytest-django could not find a Django project "
"(no manage.py file could be found). You must "
"explicitly add your Django project to the Python path "
"to have it picked up."
)
PROJECT_SCAN_DISABLED = (
"pytest-django did not search for Django "
"projects since it is disabled in the configuration "
'("django_find_project = false")'
)
@contextlib.contextmanager
def _handle_import_error(extra_message):
try:
yield
except ImportError as e:
django_msg = (e.args[0] + "\n\n") if e.args else ""
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args):
def is_django_project(path):
try:
return path.is_dir() and (path / "manage.py").exists()
except OSError:
return False
def arg_to_path(arg):
# Test classes or functions can be appended to paths separated by ::
arg = arg.split("::", 1)[0]
return pathlib.Path(arg)
def find_django_path(args):
args = map(str, args)
args = [arg_to_path(x) for x in args if not x.startswith("-")]
cwd = pathlib.Path.cwd()
if not args:
args.append(cwd)
elif cwd not in args:
args.append(cwd)
for arg in args:
if is_django_project(arg):
return arg
for parent in arg.parents:
if is_django_project(parent):
return parent
return None
project_dir = find_django_path(args)
if project_dir:
sys.path.insert(0, str(project_dir.absolute()))
return PROJECT_FOUND % project_dir
return PROJECT_NOT_FOUND
def _setup_django():
if "django" not in sys.modules:
return
import django.conf
# Avoid force-loading Django when settings are not properly configured.
if not django.conf.settings.configured:
return
import django.apps
if not django.apps.apps.ready:
django.setup()
_blocking_manager.block()
def _get_boolean_value(x, name, default=None):
if x is None:
return default
if x in (True, False):
return x
possible_values = {"true": True, "false": False, "1": True, "0": False}
try:
return possible_values[x.lower()]
except KeyError:
raise ValueError(
"{} is not a valid value for {}. "
"It must be one of {}.".format(x, name, ", ".join(possible_values.keys()))
)
def pytest_load_initial_conftests(early_config, parser, args):
# Register the marks
early_config.addinivalue_line(
"markers",
"django_db(transaction=False): Mark the test as using "
"the Django test database. The *transaction* argument marks will "
"allow you to use real transactions in the test like Django's "
"TransactionTestCase.",
)
early_config.addinivalue_line(
"markers",
"urls(modstr): Use a different URLconf for this test, similar to "
"the `urls` attribute of Django's `TestCase` objects. *modstr* is "
"a string specifying the module of a URL config, e.g. "
'"my_app.test_urls".',
)
early_config.addinivalue_line(
"markers",
"ignore_template_errors(): ignore errors from invalid template "
"variables (if --fail-on-template-vars is used).",
)
options = parser.parse_known_args(args)
if options.version or options.help:
return
django_find_project = _get_boolean_value(
early_config.getini("django_find_project"), "django_find_project"
)
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
if (
options.itv
or _get_boolean_value(
os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV
)
or early_config.getini(INVALID_TEMPLATE_VARS_ENV)
):
os.environ[INVALID_TEMPLATE_VARS_ENV] = "true"
def _get_option_with_source(option, envname):
if option:
return option, "option"
if envname in os.environ:
return os.environ[envname], "env"
cfgval = early_config.getini(envname)
if cfgval:
return cfgval, "ini"
return None, None
ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV)
dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV)
if ds:
_report_header.append("settings: {} (from {})".format(ds, ds_source))
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
_report_header.append("configuration: {} (from {})".format(dc, dc_source))
os.environ[CONFIGURATION_ENV] = dc
# Install the django-configurations importer
import configurations.importer
configurations.importer.install()
# Forcefully load Django settings, throws ImportError or
# ImproperlyConfigured if settings cannot be loaded.
from django.conf import settings as dj_settings
with _handle_import_error(_django_project_scan_outcome):
dj_settings.DATABASES
_setup_django()
def pytest_report_header():
if _report_header:
return ["django: " + ", ".join(_report_header)]
@pytest.hookimpl(trylast=True)
def pytest_configure():
# Allow Django settings to be configured in a user pytest_configure call,
# but make sure we call django.setup()
_setup_django()
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(items):
# If Django is not configured we don't need to bother
if not django_settings_is_configured():
return
from django.test import TestCase, TransactionTestCase
def get_order_number(test):
if hasattr(test, "cls") and test.cls:
# Beware, TestCase is a subclass of TransactionTestCase
if issubclass(test.cls, TestCase):
return 0
if issubclass(test.cls, TransactionTestCase):
return 1
marker_db = test.get_closest_marker('django_db')
if marker_db:
transaction = validate_django_db(marker_db)[0]
if transaction is True:
return 1
else:
transaction = None
fixtures = getattr(test, 'fixturenames', [])
if "transactional_db" in fixtures:
return 1
if transaction is False:
return 0
if "db" in fixtures:
return 0
return 2
items[:] = sorted(items, key=get_order_number)
@pytest.fixture(autouse=True, scope="session")
def django_test_environment(request):
"""
Ensure that Django is loaded and has its testing environment setup.
XXX It is a little dodgy that this is an autouse fixture. Perhaps
an email fixture should be requested in order to be able to
use the Django email machinery just like you need to request a
db fixture for access to the Django database, etc. But
without duplicating a lot more of Django's test support code
we need to follow this model.
"""
if django_settings_is_configured():
_setup_django()
from django.test.utils import setup_test_environment, teardown_test_environment
debug_ini = request.config.getini("django_debug_mode")
if debug_ini == "keep":
debug = None
else:
debug = _get_boolean_value(debug_ini, False)
setup_test_environment(debug=debug)
request.addfinalizer(teardown_test_environment)
@pytest.fixture(scope="session")
def django_db_blocker():
"""Wrapper around Django's database access.
This object can be used to re-enable database access. This fixture is used
internally in pytest-django to build the other fixtures and can be used for
special database handling.
The object is a context manager and provides the methods
.unblock()/.block() and .restore() to temporarily enable database access.
This is an advanced feature that is meant to be used to implement database
fixtures.
"""
if not django_settings_is_configured():
return None
return _blocking_manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
"""Implement the django_db marker, internal to pytest-django.
This will dynamically request the ``db``, ``transactional_db`` or
``django_db_reset_sequences`` fixtures as required by the django_db marker.
"""
marker = request.node.get_closest_marker("django_db")
if marker:
transaction, reset_sequences = validate_django_db(marker)
if reset_sequences:
request.getfixturevalue("django_db_reset_sequences")
elif transaction:
request.getfixturevalue("transactional_db")
else:
request.getfixturevalue("db")
@pytest.fixture(autouse=True, scope="class")
def _django_setup_unittest(request, django_db_blocker):
"""Setup a django unittest, internal to pytest-django."""
if not django_settings_is_configured() or not is_django_unittest(request):
yield
return
# Fix/patch pytest.
# Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991
# After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824
from _pytest.unittest import TestCaseFunction
original_runtest = TestCaseFunction.runtest
def non_debugging_runtest(self):
self._testcase(result=self)
try:
TestCaseFunction.runtest = non_debugging_runtest
request.getfixturevalue("django_db_setup")
with django_db_blocker.unblock():
yield
finally:
TestCaseFunction.runtest = original_runtest
@pytest.fixture(scope="function", autouse=True)
def _dj_autoclear_mailbox():
if not django_settings_is_configured():
return
from django.core import mail
del mail.outbox[:]
@pytest.fixture(scope="function")
def mailoutbox(django_mail_patch_dns, _dj_autoclear_mailbox):
if not django_settings_is_configured():
return
from django.core import mail
return mail.outbox
@pytest.fixture(scope="function")
def django_mail_patch_dns(monkeypatch, django_mail_dnsname):
from django.core import mail
monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname)
@pytest.fixture(scope="function")
def django_mail_dnsname():
return "fake-tests.example.com"
@pytest.fixture(autouse=True, scope="function")
def _django_set_urlconf(request):
"""Apply the @pytest.mark.urls marker, internal to pytest-django."""
marker = request.node.get_closest_marker("urls")
if marker:
skip_if_no_django()
import django.conf
from django.urls import clear_url_caches, set_urlconf
urls = validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = urls
clear_url_caches()
set_urlconf(None)
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
# Copy the pattern from
# https://github.com/django/django/blob/master/django/test/signals.py#L152
clear_url_caches()
set_urlconf(None)
request.addfinalizer(restore)
@pytest.fixture(autouse=True, scope="session")
def _fail_for_invalid_template_variable():
"""Fixture that fails for invalid variables in templates.
This fixture will fail each test that uses django template rendering
should a template contain an invalid template variable.
The fail message will include the name of the invalid variable and
in most cases the template name.
It does not raise an exception, but fails, as the stack trace doesn't
offer any helpful information to debug.
This behavior can be switched off using the marker:
``pytest.mark.ignore_template_errors``
"""
class InvalidVarException:
"""Custom handler for invalid strings in templates."""
def __init__(self):
self.fail = True
def __contains__(self, key):
return key == "%s"
@staticmethod
def _get_origin():
stack = inspect.stack()
# Try to use topmost `self.origin` first (Django 1.9+, and with
# TEMPLATE_DEBUG)..
for f in stack[2:]:
func = f[3]
if func == "render":
frame = f[0]
try:
origin = frame.f_locals["self"].origin
except (AttributeError, KeyError):
continue
if origin is not None:
return origin
from django.template import Template
# finding the ``render`` needle in the stack
frame = reduce(
lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack
)
# assert 0, stack
frame = frame[0]
# finding only the frame locals in all frame members
f_locals = reduce(
lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame)
)[1]
# ``django.template.base.Template``
template = f_locals["self"]
if isinstance(template, Template):
return template.name
def __mod__(self, var):
origin = self._get_origin()
if origin:
msg = "Undefined template variable '{}' in '{}'".format(var, origin)
else:
msg = "Undefined template variable '%s'" % var
if self.fail:
pytest.fail(msg)
else:
return msg
if (
os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true"
and django_settings_is_configured()
):
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = InvalidVarException()
@pytest.fixture(autouse=True)
def _template_string_if_invalid_marker(request):
"""Apply the @pytest.mark.ignore_template_errors marker,
internal to pytest-django."""
marker = request.keywords.get("ignore_template_errors", None)
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true":
if marker and django_settings_is_configured():
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False
@pytest.fixture(autouse=True, scope="function")
def _django_clear_site_cache():
"""Clears ``django.contrib.sites.models.SITE_CACHE`` to avoid
unexpected behavior with cached site objects.
"""
if django_settings_is_configured():
from django.conf import settings as dj_settings
if "django.contrib.sites" in dj_settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
Site.objects.clear_cache()
# ############### Helper Functions ################
class _DatabaseBlockerContextManager:
def __init__(self, db_blocker):
self._db_blocker = db_blocker
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self._db_blocker.restore()
class _DatabaseBlocker:
"""Manager for django.db.backends.base.base.BaseDatabaseWrapper.
This is the object returned by django_db_blocker.
"""
def __init__(self):
self._history = []
self._real_ensure_connection = None
@property
def _dj_db_wrapper(self):
from django.db.backends.base.base import BaseDatabaseWrapper
# The first time the _dj_db_wrapper is accessed, we will save a
# reference to the real implementation.
if self._real_ensure_connection is None:
self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection
return BaseDatabaseWrapper
def _save_active_wrapper(self):
return self._history.append(self._dj_db_wrapper.ensure_connection)
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
raise RuntimeError(
"Database access not allowed, "
'use the "django_db" mark, or the '
'"db" or "transactional_db" fixtures to enable it.'
)
def unblock(self):
"""Enable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._real_ensure_connection
return _DatabaseBlockerContextManager(self)
def block(self):
"""Disable access to the Django database."""
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._blocking_wrapper
return _DatabaseBlockerContextManager(self)
def restore(self):
self._dj_db_wrapper.ensure_connection = self._history.pop()
_blocking_manager = _DatabaseBlocker()
def validate_django_db(marker):
"""Validate the django_db marker.
It checks the signature and creates the ``transaction`` and
``reset_sequences`` attributes on the marker which will have the
correct values.
A sequence reset is only allowed when combined with a transaction.
"""
def apifun(transaction=False, reset_sequences=False):
return transaction, reset_sequences
return apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
"""Validate the urls marker.
It checks the signature and creates the `urls` attribute on the
marker which will have the correct value.
"""
def apifun(urls):
return urls
return apifun(*marker.args, **marker.kwargs)
| 30.976487 | 92 | 0.655162 |
import contextlib
import inspect
from functools import reduce
import os
import pathlib
import sys
import pytest
from .django_compat import is_django_unittest
from .fixtures import django_assert_num_queries
from .fixtures import django_assert_max_num_queries
from .fixtures import django_db_setup
from .fixtures import django_db_use_migrations
from .fixtures import django_db_keepdb
from .fixtures import django_db_createdb
from .fixtures import django_db_modify_db_settings
from .fixtures import django_db_modify_db_settings_parallel_suffix
from .fixtures import django_db_modify_db_settings_tox_suffix
from .fixtures import django_db_modify_db_settings_xdist_suffix
from .fixtures import _live_server_helper
from .fixtures import admin_client
from .fixtures import admin_user
from .fixtures import async_client
from .fixtures import client
from .fixtures import db
from .fixtures import django_user_model
from .fixtures import django_username_field
from .fixtures import live_server
from .fixtures import django_db_reset_sequences
from .fixtures import async_rf
from .fixtures import rf
from .fixtures import settings
from .fixtures import transactional_db
from .lazy_django import django_settings_is_configured, skip_if_no_django
SETTINGS_MODULE_ENV = "DJANGO_SETTINGS_MODULE"
CONFIGURATION_ENV = "DJANGO_CONFIGURATION"
INVALID_TEMPLATE_VARS_ENV = "FAIL_INVALID_TEMPLATE_VARS"
_report_header = []
NGO_SETTINGS_MODULE.",
)
group.addoption(
"--dc",
action="store",
type=str,
dest="dc",
default=None,
help="Set DJANGO_CONFIGURATION.",
)
group.addoption(
"--nomigrations",
"--no-migrations",
action="store_true",
dest="nomigrations",
default=False,
help="Disable Django migrations on test setup",
)
group.addoption(
"--migrations",
action="store_false",
dest="nomigrations",
default=False,
help="Enable Django migrations on test setup",
)
parser.addini(
CONFIGURATION_ENV, "django-configurations class to use by pytest-django."
)
group.addoption(
"--liveserver",
default=None,
help="Address and port for the live_server fixture.",
)
parser.addini(
SETTINGS_MODULE_ENV, "Django settings module to use by pytest-django."
)
parser.addini(
"django_find_project",
"Automatically find and add a Django project to the " "Python path.",
type="bool",
default=True,
)
parser.addini(
"django_debug_mode",
"How to set the Django DEBUG setting (default `False`). "
"Use `keep` to not override.",
default="False",
)
group.addoption(
"--fail-on-template-vars",
action="store_true",
dest="itv",
default=False,
help="Fail for invalid variables in templates.",
)
parser.addini(
INVALID_TEMPLATE_VARS_ENV,
"Fail for invalid variables in templates.",
type="bool",
default=False,
)
PROJECT_FOUND = (
"pytest-django found a Django project in %s "
"(it contains manage.py) and added it to the Python path.\n"
'If this is wrong, add "django_find_project = false" to '
"pytest.ini and explicitly manage your Python path."
)
PROJECT_NOT_FOUND = (
"pytest-django could not find a Django project "
"(no manage.py file could be found). You must "
"explicitly add your Django project to the Python path "
"to have it picked up."
)
PROJECT_SCAN_DISABLED = (
"pytest-django did not search for Django "
"projects since it is disabled in the configuration "
'("django_find_project = false")'
)
@contextlib.contextmanager
def _handle_import_error(extra_message):
try:
yield
except ImportError as e:
django_msg = (e.args[0] + "\n\n") if e.args else ""
msg = django_msg + extra_message
raise ImportError(msg)
def _add_django_project_to_path(args):
def is_django_project(path):
try:
return path.is_dir() and (path / "manage.py").exists()
except OSError:
return False
def arg_to_path(arg):
arg = arg.split("::", 1)[0]
return pathlib.Path(arg)
def find_django_path(args):
args = map(str, args)
args = [arg_to_path(x) for x in args if not x.startswith("-")]
cwd = pathlib.Path.cwd()
if not args:
args.append(cwd)
elif cwd not in args:
args.append(cwd)
for arg in args:
if is_django_project(arg):
return arg
for parent in arg.parents:
if is_django_project(parent):
return parent
return None
project_dir = find_django_path(args)
if project_dir:
sys.path.insert(0, str(project_dir.absolute()))
return PROJECT_FOUND % project_dir
return PROJECT_NOT_FOUND
def _setup_django():
if "django" not in sys.modules:
return
import django.conf
if not django.conf.settings.configured:
return
import django.apps
if not django.apps.apps.ready:
django.setup()
_blocking_manager.block()
def _get_boolean_value(x, name, default=None):
if x is None:
return default
if x in (True, False):
return x
possible_values = {"true": True, "false": False, "1": True, "0": False}
try:
return possible_values[x.lower()]
except KeyError:
raise ValueError(
"{} is not a valid value for {}. "
"It must be one of {}.".format(x, name, ", ".join(possible_values.keys()))
)
def pytest_load_initial_conftests(early_config, parser, args):
early_config.addinivalue_line(
"markers",
"django_db(transaction=False): Mark the test as using "
"the Django test database. The *transaction* argument marks will "
"allow you to use real transactions in the test like Django's "
"TransactionTestCase.",
)
early_config.addinivalue_line(
"markers",
"urls(modstr): Use a different URLconf for this test, similar to "
"the `urls` attribute of Django's `TestCase` objects. *modstr* is "
"a string specifying the module of a URL config, e.g. "
'"my_app.test_urls".',
)
early_config.addinivalue_line(
"markers",
"ignore_template_errors(): ignore errors from invalid template "
"variables (if --fail-on-template-vars is used).",
)
options = parser.parse_known_args(args)
if options.version or options.help:
return
django_find_project = _get_boolean_value(
early_config.getini("django_find_project"), "django_find_project"
)
if django_find_project:
_django_project_scan_outcome = _add_django_project_to_path(args)
else:
_django_project_scan_outcome = PROJECT_SCAN_DISABLED
if (
options.itv
or _get_boolean_value(
os.environ.get(INVALID_TEMPLATE_VARS_ENV), INVALID_TEMPLATE_VARS_ENV
)
or early_config.getini(INVALID_TEMPLATE_VARS_ENV)
):
os.environ[INVALID_TEMPLATE_VARS_ENV] = "true"
def _get_option_with_source(option, envname):
if option:
return option, "option"
if envname in os.environ:
return os.environ[envname], "env"
cfgval = early_config.getini(envname)
if cfgval:
return cfgval, "ini"
return None, None
ds, ds_source = _get_option_with_source(options.ds, SETTINGS_MODULE_ENV)
dc, dc_source = _get_option_with_source(options.dc, CONFIGURATION_ENV)
if ds:
_report_header.append("settings: {} (from {})".format(ds, ds_source))
os.environ[SETTINGS_MODULE_ENV] = ds
if dc:
_report_header.append("configuration: {} (from {})".format(dc, dc_source))
os.environ[CONFIGURATION_ENV] = dc
import configurations.importer
configurations.importer.install()
from django.conf import settings as dj_settings
with _handle_import_error(_django_project_scan_outcome):
dj_settings.DATABASES
_setup_django()
def pytest_report_header():
if _report_header:
return ["django: " + ", ".join(_report_header)]
@pytest.hookimpl(trylast=True)
def pytest_configure():
_setup_django()
@pytest.hookimpl(tryfirst=True)
def pytest_collection_modifyitems(items):
if not django_settings_is_configured():
return
from django.test import TestCase, TransactionTestCase
def get_order_number(test):
if hasattr(test, "cls") and test.cls:
# Beware, TestCase is a subclass of TransactionTestCase
if issubclass(test.cls, TestCase):
return 0
if issubclass(test.cls, TransactionTestCase):
return 1
marker_db = test.get_closest_marker('django_db')
if marker_db:
transaction = validate_django_db(marker_db)[0]
if transaction is True:
return 1
else:
transaction = None
fixtures = getattr(test, 'fixturenames', [])
if "transactional_db" in fixtures:
return 1
if transaction is False:
return 0
if "db" in fixtures:
return 0
return 2
items[:] = sorted(items, key=get_order_number)
@pytest.fixture(autouse=True, scope="session")
def django_test_environment(request):
if django_settings_is_configured():
_setup_django()
from django.test.utils import setup_test_environment, teardown_test_environment
debug_ini = request.config.getini("django_debug_mode")
if debug_ini == "keep":
debug = None
else:
debug = _get_boolean_value(debug_ini, False)
setup_test_environment(debug=debug)
request.addfinalizer(teardown_test_environment)
@pytest.fixture(scope="session")
def django_db_blocker():
if not django_settings_is_configured():
return None
return _blocking_manager
@pytest.fixture(autouse=True)
def _django_db_marker(request):
marker = request.node.get_closest_marker("django_db")
if marker:
transaction, reset_sequences = validate_django_db(marker)
if reset_sequences:
request.getfixturevalue("django_db_reset_sequences")
elif transaction:
request.getfixturevalue("transactional_db")
else:
request.getfixturevalue("db")
@pytest.fixture(autouse=True, scope="class")
def _django_setup_unittest(request, django_db_blocker):
if not django_settings_is_configured() or not is_django_unittest(request):
yield
return
# Fix/patch pytest.
# Before pytest 5.4: https://github.com/pytest-dev/pytest/issues/5991
# After pytest 5.4: https://github.com/pytest-dev/pytest-django/issues/824
from _pytest.unittest import TestCaseFunction
original_runtest = TestCaseFunction.runtest
def non_debugging_runtest(self):
self._testcase(result=self)
try:
TestCaseFunction.runtest = non_debugging_runtest
request.getfixturevalue("django_db_setup")
with django_db_blocker.unblock():
yield
finally:
TestCaseFunction.runtest = original_runtest
@pytest.fixture(scope="function", autouse=True)
def _dj_autoclear_mailbox():
if not django_settings_is_configured():
return
from django.core import mail
del mail.outbox[:]
@pytest.fixture(scope="function")
def mailoutbox(django_mail_patch_dns, _dj_autoclear_mailbox):
if not django_settings_is_configured():
return
from django.core import mail
return mail.outbox
@pytest.fixture(scope="function")
def django_mail_patch_dns(monkeypatch, django_mail_dnsname):
from django.core import mail
monkeypatch.setattr(mail.message, "DNS_NAME", django_mail_dnsname)
@pytest.fixture(scope="function")
def django_mail_dnsname():
return "fake-tests.example.com"
@pytest.fixture(autouse=True, scope="function")
def _django_set_urlconf(request):
marker = request.node.get_closest_marker("urls")
if marker:
skip_if_no_django()
import django.conf
from django.urls import clear_url_caches, set_urlconf
urls = validate_urls(marker)
original_urlconf = django.conf.settings.ROOT_URLCONF
django.conf.settings.ROOT_URLCONF = urls
clear_url_caches()
set_urlconf(None)
def restore():
django.conf.settings.ROOT_URLCONF = original_urlconf
# Copy the pattern from
# https://github.com/django/django/blob/master/django/test/signals.py#L152
clear_url_caches()
set_urlconf(None)
request.addfinalizer(restore)
@pytest.fixture(autouse=True, scope="session")
def _fail_for_invalid_template_variable():
class InvalidVarException:
def __init__(self):
self.fail = True
def __contains__(self, key):
return key == "%s"
@staticmethod
def _get_origin():
stack = inspect.stack()
# Try to use topmost `self.origin` first (Django 1.9+, and with
# TEMPLATE_DEBUG)..
for f in stack[2:]:
func = f[3]
if func == "render":
frame = f[0]
try:
origin = frame.f_locals["self"].origin
except (AttributeError, KeyError):
continue
if origin is not None:
return origin
from django.template import Template
# finding the ``render`` needle in the stack
frame = reduce(
lambda x, y: y[3] == "render" and "base.py" in y[1] and y or x, stack
)
# assert 0, stack
frame = frame[0]
# finding only the frame locals in all frame members
f_locals = reduce(
lambda x, y: y[0] == "f_locals" and y or x, inspect.getmembers(frame)
)[1]
# ``django.template.base.Template``
template = f_locals["self"]
if isinstance(template, Template):
return template.name
def __mod__(self, var):
origin = self._get_origin()
if origin:
msg = "Undefined template variable '{}' in '{}'".format(var, origin)
else:
msg = "Undefined template variable '%s'" % var
if self.fail:
pytest.fail(msg)
else:
return msg
if (
os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true"
and django_settings_is_configured()
):
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"] = InvalidVarException()
@pytest.fixture(autouse=True)
def _template_string_if_invalid_marker(request):
marker = request.keywords.get("ignore_template_errors", None)
if os.environ.get(INVALID_TEMPLATE_VARS_ENV, "false") == "true":
if marker and django_settings_is_configured():
from django.conf import settings as dj_settings
if dj_settings.TEMPLATES:
dj_settings.TEMPLATES[0]["OPTIONS"]["string_if_invalid"].fail = False
@pytest.fixture(autouse=True, scope="function")
def _django_clear_site_cache():
if django_settings_is_configured():
from django.conf import settings as dj_settings
if "django.contrib.sites" in dj_settings.INSTALLED_APPS:
from django.contrib.sites.models import Site
Site.objects.clear_cache()
# ############### Helper Functions ################
class _DatabaseBlockerContextManager:
def __init__(self, db_blocker):
self._db_blocker = db_blocker
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
self._db_blocker.restore()
class _DatabaseBlocker:
def __init__(self):
self._history = []
self._real_ensure_connection = None
@property
def _dj_db_wrapper(self):
from django.db.backends.base.base import BaseDatabaseWrapper
# The first time the _dj_db_wrapper is accessed, we will save a
# reference to the real implementation.
if self._real_ensure_connection is None:
self._real_ensure_connection = BaseDatabaseWrapper.ensure_connection
return BaseDatabaseWrapper
def _save_active_wrapper(self):
return self._history.append(self._dj_db_wrapper.ensure_connection)
def _blocking_wrapper(*args, **kwargs):
__tracebackhide__ = True
__tracebackhide__ # Silence pyflakes
raise RuntimeError(
"Database access not allowed, "
'use the "django_db" mark, or the '
'"db" or "transactional_db" fixtures to enable it.'
)
def unblock(self):
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._real_ensure_connection
return _DatabaseBlockerContextManager(self)
def block(self):
self._save_active_wrapper()
self._dj_db_wrapper.ensure_connection = self._blocking_wrapper
return _DatabaseBlockerContextManager(self)
def restore(self):
self._dj_db_wrapper.ensure_connection = self._history.pop()
_blocking_manager = _DatabaseBlocker()
def validate_django_db(marker):
def apifun(transaction=False, reset_sequences=False):
return transaction, reset_sequences
return apifun(*marker.args, **marker.kwargs)
def validate_urls(marker):
def apifun(urls):
return urls
return apifun(*marker.args, **marker.kwargs)
| true | true |
f72489a44f3b9b2634bf77eab598bc59f36daa24 | 26,662 | py | Python | src/squad/graphs.py | douglasdaly/spot-robot | 7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2 | [
"MIT"
] | null | null | null | src/squad/graphs.py | douglasdaly/spot-robot | 7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2 | [
"MIT"
] | null | null | null | src/squad/graphs.py | douglasdaly/spot-robot | 7a4fdd7eb5fe5fc2d31180ed6b9f7ea21647bea2 | [
"MIT"
] | null | null | null | from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload
import numpy as np
from squad.exceptions import (
EdgeAlreadyExists,
EdgeNotFound,
NodeAlreadyExists,
NodeNotFound,
)
class Node:
"""
Single node in a graph.
"""
def __init__(self, name: str, **data: Any) -> None:
self._name = name
self._data = data
@property
def name(self) -> str:
"""str: The name of this node."""
return self._name
@property
def data(self) -> Dict[str, Any]:
"""Dict[str, Any]: The data stored in this node (if any)."""
return self._data.copy()
def __str__(self) -> str:
return f"{self.__class__.__name__}({self._name})"
def __hash__(self) -> int:
return hash((self.__class__.__name__, self._name))
def __eq__(self, __o: object) -> bool:
if isinstance(__o, Node):
return self._name == __o._name
elif isinstance(__o, str):
return self._name == __o
raise ValueError(
f"Cannot compare {self.__class__.__name__} with"
f" {type(__o).__name__}"
)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def update(self, **data: Any) -> None:
"""Updates the data stored on this node.
Parameters
----------
**data : Any, optional
The data parameters to update on this node.
"""
self._data.update(data)
class Edge:
"""
Single edge in a graph.
"""
def __init__(
self,
u: Node,
v: Node,
weight: float = 1.0,
**data: Any,
) -> None:
self._u = u
self._v = v
self._wgt = weight
self._value: Optional[float] = None
self._data = data
@property
def u(self) -> Node:
"""Node: The first node in this edge."""
return self._u
@property
def v(self) -> Node:
"""Node: The second node in this edge."""
return self._v
@property
def weight(self) -> float:
"""float: The weight of this edge."""
return self._wgt
@weight.setter
def weight(self, value: float) -> None:
self._wgt = value
@property
def value(self) -> float:
"""float: The value of this edge."""
if self._value is None:
self._value = self.get_value()
return self._value
@value.setter
def value(self, value: float) -> None:
self._value = value
@property
def weighted_value(self) -> float:
"""float: The weighted-value of this edge."""
return self._wgt * self.value
@property
def data(self) -> Dict[str, Any]:
"""Dict[str, Any]: The data associated with this edge (if any)."""
return self._data.copy()
def __str__(self) -> str:
return f"{self.__class__.__name__}({self._u.name}, {self._v.name})"
def __hash__(self) -> int:
return hash((self.__class__.__name__, self._u, self._v))
def __eq__(self, __o: object) -> bool:
if isinstance(__o, Edge):
return self._u == __o._u and self._v == __o._v
elif isinstance(__o, tuple):
return self._u._name == __o[0] and self._v._name == __o[1]
raise ValueError(
f"Cannot compare {self.__class__.__name__} with"
f" {type(__o).__name__}"
)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def __call__(self, **kwargs: Any) -> float:
self.update(**kwargs)
return self._wgt * self.value
def update(self, **data: Any) -> None:
"""Updates this edge's state.
Parameters
----------
**data : Any
Any named-parameters to update the edge's data with.
"""
if data:
self._data.update(data)
self._value = self.get_value()
def get_value(self) -> float:
"""Gets the value associated with this edge.
Returns
-------
float
The computed value for this edge.
"""
return 1.0
def remove_square_matrix_index(matrix: np.ndarray, index: int) -> np.ndarray:
"""Removes the row & column of the specified index from the given
square matrix.
Parameters
----------
matrix : np.ndarray
The square matrix to remove the specified `index` row and column
from.
index : int
The index of the row & column to remove from the given `matrix`.
Returns
-------
np.ndarray
The new matrix, from the original `matrix` given, with the
desired row & column `index` removed.
Raises
------
ValueError
If the given `matrix` is not a square matrix.
IndexError
If the given `index` is invalid for the bounds of the given
`matrix`.
"""
if matrix.ndim < 2 or matrix.shape[0] != matrix.shape[1]:
raise ValueError(f"Invalid matrix given, shape: {matrix.shape}")
elif abs(index) > (matrix.shape[0] - 1):
raise IndexError(index)
return np.delete(np.delete(matrix, index, axis=0), index, axis=1)
class Graph:
"""
Directed graph.
"""
def __init__(
self,
node_cls: Optional[Type[Node]] = None,
edge_cls: Optional[Type[Edge]] = None,
) -> None:
self._node_cls = node_cls or Node
self._nodes: List[Node] = []
self._node_lookup: Dict[str, int] = {}
self._edge_cls = edge_cls or Edge
self._edges: List[Edge] = []
self._edge_lookup: Dict[Tuple[str, str], int] = {}
self._adj_mat = np.array([], dtype=float)
self._con_mat = self._adj_mat.copy()
@property
def nodes(self) -> Dict[str, Node]:
"""Dict[str, Node]: The nodes contained in this graph."""
return {x.name: x for x in self._nodes}
@property
def edges(self) -> Dict[str, Dict[str, Edge]]:
"""Dict[str, Dict[str, Edge]]: The edges in this graph."""
ret = {x.name: {} for x in self._nodes}
for x in self._edges:
ret[x.u.name][x.v.name] = x
return ret
def __getitem__(
self,
key: Union[str, Tuple[str, str]],
) -> Union[Edge, Node]:
if isinstance(key, str):
if key not in self._node_lookup:
raise NodeNotFound(key)
return self._nodes[self._node_lookup[key]]
else:
if key not in self._edge_lookup:
raise EdgeNotFound(*key)
return self._edges[self._edge_lookup[key]]
def add(self, obj: Union[Edge, Node]) -> None:
"""Adds an edge or node to this graph.
Parameters
----------
obj : Union[Edge, Node]
The node or edge object to add to this graph.
Raises
------
EdgeAlreadyExists
If the given edge `obj` is already in this graph.
NodeAlreadyExists
If the given node `obj` is already in this graph.
NodeNotFound
If one or both of the nodes in the given edge `obj` is not
in this graph.
"""
if isinstance(obj, Edge):
if obj in self._edges:
raise EdgeAlreadyExists(obj.u.name, obj.v.name)
elif obj.u.name not in self._node_lookup:
raise NodeNotFound(obj.u.name)
elif obj.v.name not in self._node_lookup:
raise NodeNotFound(obj.v.name)
self._add_edge_obj(obj)
else:
if obj in self._nodes:
raise NodeAlreadyExists(obj.name)
self._add_node_obj(obj)
return
def remove(self, obj: Union[Edge, Node]) -> None:
"""Removes the given edge or node from this graph.
Parameters
----------
obj : Union[Edge, Node]
The edge or node object to remove from this graph.
Raises
------
EdgeNotFound
If the given edge `obj` could not be found.
NodeNotFound
If the given node `obj` could not be found.
"""
if isinstance(obj, Edge):
if obj not in self._edges:
raise EdgeNotFound(obj.u.name, obj.v.name)
self._remove_edge_obj(obj.u.name, obj.v.name)
else:
if obj not in self._nodes:
raise NodeNotFound(obj.name)
self._remove_node_obj(obj.name)
return
def clear(self) -> None:
"""Clears all nodes and edges from this graph."""
self._node_lookup.clear()
self._nodes.clear()
self._edge_lookup.clear()
self._edges.clear()
self._adj_mat = np.array([], dtype=self._adj_mat.dtype)
self._con_mat = self._adj_mat.copy()
def _add_edge_obj(self, edge: Edge) -> None:
"""Adds a new edge object to this graph."""
self._edges.append(edge)
new_n_edges = len(self._edges)
self._edge_lookup[(edge.u.name, edge.v.name)] = new_n_edges - 1
idx_u = self._nodes.index(edge.u)
idx_v = self._nodes.index(edge.v)
self._adj_mat[idx_u, idx_v] = 1.0
self._con_mat[idx_u, idx_v] = 1.0
if idx_u != idx_v:
self._con_mat[idx_v, idx_u] = 1.0
return
def _remove_edge_obj(self, u_name: str, v_name: str) -> None:
"""Removes the specified edge from this graph."""
# - Update adjacency/connection matrices
u_idx = self._node_lookup[u_name]
v_idx = self._node_lookup[v_name]
self._adj_mat[u_idx, v_idx] = 0.0
if u_idx == v_idx:
self._con_mat[u_idx, v_idx] = 0.0
elif (v_name, u_name) not in self._edge_lookup:
self._con_mat[u_idx, v_idx] = 0.0
self._con_mat[v_idx, u_idx] = 0.0
# - Remove edge
edge_idx = self._edge_lookup.pop((u_name, v_name))
self._edges.pop(edge_idx)
# - Update lookup table for relevant edges
edge_names_to_update = [
(x.u.name, x.v.name) for x in self._edges[edge_idx:]
]
for edge_name in edge_names_to_update:
self._edge_lookup[edge_name] -= 1
return
def _add_node_obj(self, node: Node) -> None:
"""Adds a new node object to this graph."""
orig_n_nodes = len(self._nodes)
self._nodes.append(node)
self._node_lookup[node.name] = orig_n_nodes
new_n_nodes = orig_n_nodes + 1
upd_adj_mat = np.zeros(
(new_n_nodes, new_n_nodes),
dtype=self._adj_mat.dtype,
)
upd_con_mat = upd_adj_mat.copy()
if orig_n_nodes:
upd_adj_mat[:orig_n_nodes, :orig_n_nodes] = self._adj_mat
upd_con_mat[:orig_n_nodes, :orig_n_nodes] = self._con_mat
self._adj_mat = upd_adj_mat
self._con_mat = upd_con_mat
def _remove_node_obj(self, node_name: str) -> None:
"""Removes an existing node object from this graph."""
node_idx = self._node_lookup[node_name]
# Update the adjacency/connection matrices
self._adj_mat = remove_square_matrix_index(self._adj_mat, node_idx)
self._con_mat = remove_square_matrix_index(self._con_mat, node_idx)
# - Remove any edge objects connected to the node
def _edge_filter(x: Tuple[str, str]) -> bool:
return node_name in x
edge_idxs_to_remove = sorted(
(
self._edge_lookup[k]
for k in filter(_edge_filter, self._edge_lookup.keys())
),
reverse=True,
)
edge_names_to_remove = [
(x.u.name, x.v.name)
for x in (self._edges[i] for i in edge_idxs_to_remove)
]
for i, n in zip(edge_idxs_to_remove, edge_names_to_remove):
del self._edge_lookup[n]
self._edges.pop(i)
# - Remove the node object
self._nodes.pop(node_idx)
# - Update the lookup tables
for node in self._nodes[node_idx:]:
self._node_lookup[node.name] -= 1
for i, edge in enumerate(self._edges):
self._edge_lookup[(edge.u.name, edge.v.name)] = i
return
def add_node(self, name: str, **data: Any) -> None:
"""Creates and adds a new node to this graph.
Parameters
----------
name : str
The name of the node to add to this graph.
**data : Any
The data of the node to add to this graph (if any).
Raises
------
NodeAlreadyExists
If a node with the same `name` given already exists in this
graph.
"""
if name in (x.name for x in self._nodes):
raise NodeAlreadyExists(name)
new_node = self._node_cls(name, **data)
self._add_node_obj(new_node)
def add_nodes(self, *names: str, **data: Any) -> None:
"""Creates and adds new node(s) to this graph.
Parameters
----------
*names : str
The name(s) of the new nodes to create and add.
**data : Any, optional
The data (if any) to associate with each of the new nodes.
Raises
------
NodeAlreadyExists
If any of the nodes from the given `names` already exist in
this graph.
ValueError
If no `names` are provided.
"""
for name in names:
if name in self._node_lookup:
raise NodeAlreadyExists(name)
for name in names:
new_node = self._node_cls(name, **data)
self._add_node_obj(new_node)
return
def remove_node(self, name: str) -> None:
"""Removes the specified node from this graph.
Parameters
----------
name : str
The name of the node to remove.
Raises
------
NodeNotFound
If the node with the given `name` could not be found.
"""
if name not in self._node_lookup:
raise NodeNotFound(name)
self._remove_node_obj(name)
def add_edge(
self,
u_name: str,
v_name: str,
weight: float = 1.0,
**data: Any,
) -> None:
"""Creates and adds a new edge to this graph.
Parameters
----------
u_name : str
The name of the (existing) node to set as the first node for
the new edge to add.
v_name : str
The name of the (existing) node to set as the second node
for the new edge to add.
weight : float, default=1.0
The weight to use for the new edge to add.
**data : Any, optional
The data (if any) to store on the new edge.
Raises
------
EdgeAlreadyExists
If an edge for the given nodes specified already exists in
this graph.
NodeNotFound
If either of the given nodes specified could not be found.
"""
if (u_name, v_name) in ((x.u.name, x.v.name) for x in self._edges):
raise EdgeAlreadyExists(u_name, v_name)
u = None
v = None
for node in self._nodes:
if node.name == u_name:
u = node
if node.name == v_name:
v = node
if u is not None and v is not None:
break
if u is None:
raise NodeNotFound(u_name)
if v is None:
raise NodeNotFound(v_name)
new_edge = self._edge_cls(u, v, weight=weight, **data)
self._add_edge_obj(new_edge)
def add_edges(
self,
u_name: str,
*v_names: str,
weight: float = 1.0,
**data: Any,
) -> None:
"""Adds multiple edges from `u_name` to this graph.
Parameters
----------
u_name : str
The name of the (existing) node to set as the first node for
the new edges to add.
*v_names : str
The names of the (existing) nodes to set as the second node
for the new edge to add.
weight : float, default=1.0
The weight to use for each new edge to add.
**data : Any, optional
The data (if any) to store on each new edge.
Raises
------
EdgeAlreadyExists
If any edge for the given nodes specified already exists in
this graph.
NodeNotFound
If any of the given nodes specified could not be found.
ValueError
If no `v_names` are provided.
"""
if not v_names:
raise ValueError("You must provide at least one v node name")
if u_name not in self._node_lookup:
raise NodeNotFound(u_name)
else:
for v in v_names:
if v not in self._node_lookup:
raise NodeNotFound(v)
for e in ((u_name, v) for v in v_names):
if e in self._edge_lookup:
raise EdgeAlreadyExists(e[0], e[1])
u_node = self._nodes[self._node_lookup[u_name]]
for v_name in v_names:
v_node = self._nodes[self._node_lookup[v_name]]
new_edge = self._edge_cls(u_node, v_node, weight=weight, **data)
self._add_edge_obj(new_edge)
return
def remove_edge(self, u_name: str, v_name: str) -> None:
"""Removes the edge specified from this graph.
Parameters
----------
u_name : str
The name of the first node in the edge to remove.
v_name : str
The name of the second node in the edge to remove.
Raises
------
EdgeNotFound
If the specified edge could not be found.
NodeNotFound
If either node specified by the given `u_name` and `v_name`
could not be found.
"""
if u_name not in self._node_lookup:
raise NodeNotFound(u_name)
elif v_name not in self._node_lookup:
raise NodeNotFound(v_name)
elif (u_name, v_name) not in self._edge_lookup:
raise EdgeNotFound(u_name, v_name)
self._remove_edge_obj(u_name, v_name)
def update_nodes(self, *names: str, **data: Any) -> None:
"""Updates the node(s) in this graph.
Parameters
----------
*names : str, optional
The specific node(s) to update (if not given then all nodes
will be updated).
**data : Any, optional
The data updates to push to all nodes in the graph for the
update calls.
"""
if names:
nodes = (x for x in self._nodes if x.name in names)
else:
nodes = self._nodes
for node in nodes:
node.update(**data)
return
def update_edges(self, *names: str, **data: Any) -> None:
"""Updates all the edges in this graph.
Parameters
----------
*names : str, optional
The u-node (first node) names of the relevant edges to
update (if not provided then all edges are updated).
**data : Any, optional
Any data updates to push to all edges in the graph for the
update calls.
"""
if names:
edges = (x for x in self._edges if x.u.name in names)
else:
edges = self._edges
for edge in edges:
edge.update(**data)
return
@overload
def adj_edges(self, u_name: str) -> List[Edge]:
...
@overload
def adj_edges(self, u_name: str, v_name: str) -> Edge:
...
def adj_edges(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[Edge, List[Edge]]:
"""Gets the adjacenct edge(s) specified.
Parameters
----------
u_name : str
The name of the node to get the adjacent edge(s) *from*.
v_name : str, optional
The name of the node to get the adjacent edge(s) *to* (if
any). If not specified (default) it will return all
possible adjacent edges.
Returns
-------
Edge or List[Edge]
The adjacent edge(s) from the specified `u_name` (if
`v_name` was not specified). If `v_name` was given then
it just returns the adjacent edge from the specified
`u_name` node to the specified `v_name` node.
Raises
------
NodeNotFound
If the specified `u_name` node (or `v_name` node, if given)
could not be found.
EdgeNotFound
If the specified `u_name` to `v_name` (if given) edge could
not be found.
See Also
--------
adj, adj_values, adj_weights
"""
u_idx = None
v_idx = None
for i, node in enumerate(self._nodes):
if node.name == u_name:
u_idx = i
if v_name is not None:
if node.name == v_name:
v_idx = i
if u_idx is not None and v_idx is not None:
break
elif u_idx is not None:
break
if u_idx is None:
raise NodeNotFound(u_name)
if v_name is not None and v_idx is None:
raise NodeNotFound(v_name)
if v_name is None:
# - All adjacent edges
adj_edges: List[Edge] = []
for i, v in enumerate(self._adj_mat[u_idx]):
if v == 0.0:
continue
v_node = self._nodes[i]
t_edge = self._edges[self._edge_lookup[(u_name, v_node.name)]]
adj_edges.append(t_edge)
return adj_edges
else:
# - Single edge
try:
adj_edge = self._edges[self._edge_lookup[(u_name, v_name)]]
except KeyError:
raise EdgeNotFound(u_name, v_name)
return adj_edge
@overload
def adj_values(
self,
u_name: str,
) -> Dict[str, float]:
...
@overload
def adj_values(
self,
u_name: str,
v_name: str,
) -> float:
...
def adj_values(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[float, Dict[str, float]]:
"""Gets the adjacency edge value(s) for the specified node/edge.
Parameters
----------
u_name : str
The name of the node to get the adjacency data *from*.
v_name : str, optional
The name of the node to get the adjacency data *to* (if
any). If not specified (default) it will return all
possible adjacent nodes and values.
Returns
-------
float or Dict[str, float]
The adjacent edges and values from the specified `u_name`
(if `v_name` was not specified). If `v_name` was given then
it just returns the value of the adjacency edge from the
specified `u_name` node to the specified `v_name` node.
See Also
--------
adj, adj_edges, adj_weights
"""
# - Single edge value
if v_name is not None:
edge = self.adj_edges(u_name, v_name)
return edge.value
# - All adjacent edge values
edges = self.adj_edges(u_name)
ret = {x.v.name: x.value for x in edges}
return ret
@overload
def adj(
self,
u_name: str,
) -> Dict[str, float]:
...
@overload
def adj(
self,
u_name: str,
v_name: str,
) -> float:
...
def adj(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[float, Dict[str, float]]:
"""Gets the adjacency edge weighted-value(s) for the specified
node/edge.
Parameters
----------
u_name : str
The name of the node to get the adjacency data *from*.
v_name : str, optional
The name of the node to get the adjacency data *to* (if
any). If not specified (default) it will return all
possible adjacent nodes and values.
Returns
-------
float or Dict[str, float]
The adjacent edges and weighted-values from the specified
`u_name` (if `v_name` was not specified). If `v_name` was
given then it just returns the weighted-value of the
adjacent edge from the specified `u_name` node to the
specified `v_name` node.
See Also
--------
adj_edges, adj_values, adj_weights
"""
# - Single edge value
if v_name is not None:
edge = self.adj_edges(u_name, v_name)
return edge.weighted_value
# - All adjacent edge values
edges = self.adj_edges(u_name)
ret = {x.v.name: x.weighted_value for x in edges}
return ret
@overload
def adj_weights(
self,
u_name: str,
) -> Dict[str, float]:
...
@overload
def adj_weights(
self,
u_name: str,
v_name: str,
) -> float:
...
def adj_weights(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[float, Dict[str, float]]:
"""Gets the adjacency edge weight(s) of the specified node/edge.
Parameters
----------
u_name : str
The name of the node to get the adjacency data *from*.
v_name : str, optional
The name of the node to get the adjacency data *to* (if
any). If not specified (default) it will return all
possible adjacent nodes and values.
Returns
-------
float or Dict[str, float]
The adjacent edges and weight(s) from the specified `u_name`
node (if `v_name` was not specified). If `v_name` was given
then it just returns the raw value of the adjacent edge from
the specified `u_name` node to the specified `v_name` node.
See Also
--------
adj, adj_edges, adj_values
"""
# - Single edge value
if v_name is not None:
edge = self.adj_edges(u_name, v_name)
return edge.weight
# - All adjacent edge values
edges = self.adj_edges(u_name)
ret = {x.v.name: x.weight for x in edges}
return ret
| 29.39581 | 78 | 0.543808 | from typing import Any, Dict, List, Optional, Tuple, Type, Union, overload
import numpy as np
from squad.exceptions import (
EdgeAlreadyExists,
EdgeNotFound,
NodeAlreadyExists,
NodeNotFound,
)
class Node:
def __init__(self, name: str, **data: Any) -> None:
self._name = name
self._data = data
@property
def name(self) -> str:
return self._name
@property
def data(self) -> Dict[str, Any]:
return self._data.copy()
def __str__(self) -> str:
return f"{self.__class__.__name__}({self._name})"
def __hash__(self) -> int:
return hash((self.__class__.__name__, self._name))
def __eq__(self, __o: object) -> bool:
if isinstance(__o, Node):
return self._name == __o._name
elif isinstance(__o, str):
return self._name == __o
raise ValueError(
f"Cannot compare {self.__class__.__name__} with"
f" {type(__o).__name__}"
)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def update(self, **data: Any) -> None:
self._data.update(data)
class Edge:
def __init__(
self,
u: Node,
v: Node,
weight: float = 1.0,
**data: Any,
) -> None:
self._u = u
self._v = v
self._wgt = weight
self._value: Optional[float] = None
self._data = data
@property
def u(self) -> Node:
return self._u
@property
def v(self) -> Node:
return self._v
@property
def weight(self) -> float:
return self._wgt
@weight.setter
def weight(self, value: float) -> None:
self._wgt = value
@property
def value(self) -> float:
if self._value is None:
self._value = self.get_value()
return self._value
@value.setter
def value(self, value: float) -> None:
self._value = value
@property
def weighted_value(self) -> float:
return self._wgt * self.value
@property
def data(self) -> Dict[str, Any]:
return self._data.copy()
def __str__(self) -> str:
return f"{self.__class__.__name__}({self._u.name}, {self._v.name})"
def __hash__(self) -> int:
return hash((self.__class__.__name__, self._u, self._v))
def __eq__(self, __o: object) -> bool:
if isinstance(__o, Edge):
return self._u == __o._u and self._v == __o._v
elif isinstance(__o, tuple):
return self._u._name == __o[0] and self._v._name == __o[1]
raise ValueError(
f"Cannot compare {self.__class__.__name__} with"
f" {type(__o).__name__}"
)
def __getitem__(self, key: str) -> Any:
return self._data[key]
def __call__(self, **kwargs: Any) -> float:
self.update(**kwargs)
return self._wgt * self.value
def update(self, **data: Any) -> None:
if data:
self._data.update(data)
self._value = self.get_value()
def get_value(self) -> float:
return 1.0
def remove_square_matrix_index(matrix: np.ndarray, index: int) -> np.ndarray:
if matrix.ndim < 2 or matrix.shape[0] != matrix.shape[1]:
raise ValueError(f"Invalid matrix given, shape: {matrix.shape}")
elif abs(index) > (matrix.shape[0] - 1):
raise IndexError(index)
return np.delete(np.delete(matrix, index, axis=0), index, axis=1)
class Graph:
def __init__(
self,
node_cls: Optional[Type[Node]] = None,
edge_cls: Optional[Type[Edge]] = None,
) -> None:
self._node_cls = node_cls or Node
self._nodes: List[Node] = []
self._node_lookup: Dict[str, int] = {}
self._edge_cls = edge_cls or Edge
self._edges: List[Edge] = []
self._edge_lookup: Dict[Tuple[str, str], int] = {}
self._adj_mat = np.array([], dtype=float)
self._con_mat = self._adj_mat.copy()
@property
def nodes(self) -> Dict[str, Node]:
return {x.name: x for x in self._nodes}
@property
def edges(self) -> Dict[str, Dict[str, Edge]]:
ret = {x.name: {} for x in self._nodes}
for x in self._edges:
ret[x.u.name][x.v.name] = x
return ret
def __getitem__(
self,
key: Union[str, Tuple[str, str]],
) -> Union[Edge, Node]:
if isinstance(key, str):
if key not in self._node_lookup:
raise NodeNotFound(key)
return self._nodes[self._node_lookup[key]]
else:
if key not in self._edge_lookup:
raise EdgeNotFound(*key)
return self._edges[self._edge_lookup[key]]
def add(self, obj: Union[Edge, Node]) -> None:
if isinstance(obj, Edge):
if obj in self._edges:
raise EdgeAlreadyExists(obj.u.name, obj.v.name)
elif obj.u.name not in self._node_lookup:
raise NodeNotFound(obj.u.name)
elif obj.v.name not in self._node_lookup:
raise NodeNotFound(obj.v.name)
self._add_edge_obj(obj)
else:
if obj in self._nodes:
raise NodeAlreadyExists(obj.name)
self._add_node_obj(obj)
return
def remove(self, obj: Union[Edge, Node]) -> None:
if isinstance(obj, Edge):
if obj not in self._edges:
raise EdgeNotFound(obj.u.name, obj.v.name)
self._remove_edge_obj(obj.u.name, obj.v.name)
else:
if obj not in self._nodes:
raise NodeNotFound(obj.name)
self._remove_node_obj(obj.name)
return
def clear(self) -> None:
self._node_lookup.clear()
self._nodes.clear()
self._edge_lookup.clear()
self._edges.clear()
self._adj_mat = np.array([], dtype=self._adj_mat.dtype)
self._con_mat = self._adj_mat.copy()
def _add_edge_obj(self, edge: Edge) -> None:
self._edges.append(edge)
new_n_edges = len(self._edges)
self._edge_lookup[(edge.u.name, edge.v.name)] = new_n_edges - 1
idx_u = self._nodes.index(edge.u)
idx_v = self._nodes.index(edge.v)
self._adj_mat[idx_u, idx_v] = 1.0
self._con_mat[idx_u, idx_v] = 1.0
if idx_u != idx_v:
self._con_mat[idx_v, idx_u] = 1.0
return
def _remove_edge_obj(self, u_name: str, v_name: str) -> None:
u_idx = self._node_lookup[u_name]
v_idx = self._node_lookup[v_name]
self._adj_mat[u_idx, v_idx] = 0.0
if u_idx == v_idx:
self._con_mat[u_idx, v_idx] = 0.0
elif (v_name, u_name) not in self._edge_lookup:
self._con_mat[u_idx, v_idx] = 0.0
self._con_mat[v_idx, u_idx] = 0.0
edge_idx = self._edge_lookup.pop((u_name, v_name))
self._edges.pop(edge_idx)
edge_names_to_update = [
(x.u.name, x.v.name) for x in self._edges[edge_idx:]
]
for edge_name in edge_names_to_update:
self._edge_lookup[edge_name] -= 1
return
def _add_node_obj(self, node: Node) -> None:
orig_n_nodes = len(self._nodes)
self._nodes.append(node)
self._node_lookup[node.name] = orig_n_nodes
new_n_nodes = orig_n_nodes + 1
upd_adj_mat = np.zeros(
(new_n_nodes, new_n_nodes),
dtype=self._adj_mat.dtype,
)
upd_con_mat = upd_adj_mat.copy()
if orig_n_nodes:
upd_adj_mat[:orig_n_nodes, :orig_n_nodes] = self._adj_mat
upd_con_mat[:orig_n_nodes, :orig_n_nodes] = self._con_mat
self._adj_mat = upd_adj_mat
self._con_mat = upd_con_mat
def _remove_node_obj(self, node_name: str) -> None:
node_idx = self._node_lookup[node_name]
self._adj_mat = remove_square_matrix_index(self._adj_mat, node_idx)
self._con_mat = remove_square_matrix_index(self._con_mat, node_idx)
def _edge_filter(x: Tuple[str, str]) -> bool:
return node_name in x
edge_idxs_to_remove = sorted(
(
self._edge_lookup[k]
for k in filter(_edge_filter, self._edge_lookup.keys())
),
reverse=True,
)
edge_names_to_remove = [
(x.u.name, x.v.name)
for x in (self._edges[i] for i in edge_idxs_to_remove)
]
for i, n in zip(edge_idxs_to_remove, edge_names_to_remove):
del self._edge_lookup[n]
self._edges.pop(i)
self._nodes.pop(node_idx)
for node in self._nodes[node_idx:]:
self._node_lookup[node.name] -= 1
for i, edge in enumerate(self._edges):
self._edge_lookup[(edge.u.name, edge.v.name)] = i
return
def add_node(self, name: str, **data: Any) -> None:
if name in (x.name for x in self._nodes):
raise NodeAlreadyExists(name)
new_node = self._node_cls(name, **data)
self._add_node_obj(new_node)
def add_nodes(self, *names: str, **data: Any) -> None:
for name in names:
if name in self._node_lookup:
raise NodeAlreadyExists(name)
for name in names:
new_node = self._node_cls(name, **data)
self._add_node_obj(new_node)
return
def remove_node(self, name: str) -> None:
if name not in self._node_lookup:
raise NodeNotFound(name)
self._remove_node_obj(name)
def add_edge(
self,
u_name: str,
v_name: str,
weight: float = 1.0,
**data: Any,
) -> None:
if (u_name, v_name) in ((x.u.name, x.v.name) for x in self._edges):
raise EdgeAlreadyExists(u_name, v_name)
u = None
v = None
for node in self._nodes:
if node.name == u_name:
u = node
if node.name == v_name:
v = node
if u is not None and v is not None:
break
if u is None:
raise NodeNotFound(u_name)
if v is None:
raise NodeNotFound(v_name)
new_edge = self._edge_cls(u, v, weight=weight, **data)
self._add_edge_obj(new_edge)
def add_edges(
self,
u_name: str,
*v_names: str,
weight: float = 1.0,
**data: Any,
) -> None:
if not v_names:
raise ValueError("You must provide at least one v node name")
if u_name not in self._node_lookup:
raise NodeNotFound(u_name)
else:
for v in v_names:
if v not in self._node_lookup:
raise NodeNotFound(v)
for e in ((u_name, v) for v in v_names):
if e in self._edge_lookup:
raise EdgeAlreadyExists(e[0], e[1])
u_node = self._nodes[self._node_lookup[u_name]]
for v_name in v_names:
v_node = self._nodes[self._node_lookup[v_name]]
new_edge = self._edge_cls(u_node, v_node, weight=weight, **data)
self._add_edge_obj(new_edge)
return
def remove_edge(self, u_name: str, v_name: str) -> None:
if u_name not in self._node_lookup:
raise NodeNotFound(u_name)
elif v_name not in self._node_lookup:
raise NodeNotFound(v_name)
elif (u_name, v_name) not in self._edge_lookup:
raise EdgeNotFound(u_name, v_name)
self._remove_edge_obj(u_name, v_name)
def update_nodes(self, *names: str, **data: Any) -> None:
if names:
nodes = (x for x in self._nodes if x.name in names)
else:
nodes = self._nodes
for node in nodes:
node.update(**data)
return
def update_edges(self, *names: str, **data: Any) -> None:
if names:
edges = (x for x in self._edges if x.u.name in names)
else:
edges = self._edges
for edge in edges:
edge.update(**data)
return
@overload
def adj_edges(self, u_name: str) -> List[Edge]:
...
@overload
def adj_edges(self, u_name: str, v_name: str) -> Edge:
...
def adj_edges(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[Edge, List[Edge]]:
u_idx = None
v_idx = None
for i, node in enumerate(self._nodes):
if node.name == u_name:
u_idx = i
if v_name is not None:
if node.name == v_name:
v_idx = i
if u_idx is not None and v_idx is not None:
break
elif u_idx is not None:
break
if u_idx is None:
raise NodeNotFound(u_name)
if v_name is not None and v_idx is None:
raise NodeNotFound(v_name)
if v_name is None:
adj_edges: List[Edge] = []
for i, v in enumerate(self._adj_mat[u_idx]):
if v == 0.0:
continue
v_node = self._nodes[i]
t_edge = self._edges[self._edge_lookup[(u_name, v_node.name)]]
adj_edges.append(t_edge)
return adj_edges
else:
try:
adj_edge = self._edges[self._edge_lookup[(u_name, v_name)]]
except KeyError:
raise EdgeNotFound(u_name, v_name)
return adj_edge
@overload
def adj_values(
self,
u_name: str,
) -> Dict[str, float]:
...
@overload
def adj_values(
self,
u_name: str,
v_name: str,
) -> float:
...
def adj_values(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[float, Dict[str, float]]:
if v_name is not None:
edge = self.adj_edges(u_name, v_name)
return edge.value
edges = self.adj_edges(u_name)
ret = {x.v.name: x.value for x in edges}
return ret
@overload
def adj(
self,
u_name: str,
) -> Dict[str, float]:
...
@overload
def adj(
self,
u_name: str,
v_name: str,
) -> float:
...
def adj(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[float, Dict[str, float]]:
if v_name is not None:
edge = self.adj_edges(u_name, v_name)
return edge.weighted_value
edges = self.adj_edges(u_name)
ret = {x.v.name: x.weighted_value for x in edges}
return ret
@overload
def adj_weights(
self,
u_name: str,
) -> Dict[str, float]:
...
@overload
def adj_weights(
self,
u_name: str,
v_name: str,
) -> float:
...
def adj_weights(
self,
u_name: str,
v_name: Optional[str] = None,
) -> Union[float, Dict[str, float]]:
if v_name is not None:
edge = self.adj_edges(u_name, v_name)
return edge.weight
edges = self.adj_edges(u_name)
ret = {x.v.name: x.weight for x in edges}
return ret
| true | true |
f72489d38d9e27f6b45fff3179aa6e269ca30371 | 1,919 | py | Python | tools/validatefiles.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | tools/validatefiles.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | null | null | null | tools/validatefiles.py | aleasims/Peach | bb56841e943d719d5101fee0a503ed34308eda04 | [
"MIT"
] | 1 | 2020-07-26T03:57:45.000Z | 2020-07-26T03:57:45.000Z | '''
This script will validate a DataModel against an collection of
input files. It will verify they are able to be parsed correctly.
'''
import os, sys, time, glob
sys.path.append("c:/peach")
print """
]] Peach Validate Multiple Files
]] Copyright (c) Michael Eddington
"""
if len(sys.argv) < 3:
print """
This program will crack a series of files against a selected
data model and verify the output matches the input. This allows
for build validation of data models.
Syntax: validatefiles <Peach PIT> <Data Model> <Input Files>
Peach PIT - The Peach XML file containing the data model
Data Model - Name of the data model to crack against
Input Files - The path to a folder or a UNIX style Glob
"""
sys.exit(0)
from Peach.Engine import *
from Peach.Engine.incoming import DataCracker
from Peach.publisher import *
from Peach.analyzer import Analyzer
from Peach.Analyzers import *
inputFiles = []
inputFilesPath = sys.argv[3]
dataModelName = sys.argv[2]
xmlFile = sys.argv[1]
if os.path.isdir(inputFilesPath):
for file in os.listdir(inputFilesPath):
inputFiles.append(os.path.join(inputFilesPath, file))
else:
inputFiles = glob.glob(inputFilesPath)
print " - Found %d files\n" % len(inputFiles)
peach = Analyzer.DefaultParser().asParser("file:"+xmlFile)
dataModel = peach.templates[dataModelName]
for file in inputFiles:
#peach = Analyzer.DefaultParser().asParser("file:"+xmlFile)
dataModel = peach.templates[dataModelName].copy(peach)
fd = open(file, "rb")
data = fd.read()
fd.close()
buff = PublisherBuffer(None, data, True)
cracker = DataCracker(peach)
cracker.optmizeModelForCracking(dataModel, True)
cracker.crackData(dataModel, buff)
if dataModel.getValue() == data:
print "Cracking of file '"+file+"' passed."
else:
print "Cracking of file '"+file+"' failed."
print "Done\n"
# end
| 25.25 | 67 | 0.704534 | '''
This script will validate a DataModel against an collection of
input files. It will verify they are able to be parsed correctly.
'''
import os, sys, time, glob
sys.path.append("c:/peach")
print """
]] Peach Validate Multiple Files
]] Copyright (c) Michael Eddington
"""
if len(sys.argv) < 3:
print """
This program will crack a series of files against a selected
data model and verify the output matches the input. This allows
for build validation of data models.
Syntax: validatefiles <Peach PIT> <Data Model> <Input Files>
Peach PIT - The Peach XML file containing the data model
Data Model - Name of the data model to crack against
Input Files - The path to a folder or a UNIX style Glob
"""
sys.exit(0)
from Peach.Engine import *
from Peach.Engine.incoming import DataCracker
from Peach.publisher import *
from Peach.analyzer import Analyzer
from Peach.Analyzers import *
inputFiles = []
inputFilesPath = sys.argv[3]
dataModelName = sys.argv[2]
xmlFile = sys.argv[1]
if os.path.isdir(inputFilesPath):
for file in os.listdir(inputFilesPath):
inputFiles.append(os.path.join(inputFilesPath, file))
else:
inputFiles = glob.glob(inputFilesPath)
print " - Found %d files\n" % len(inputFiles)
peach = Analyzer.DefaultParser().asParser("file:"+xmlFile)
dataModel = peach.templates[dataModelName]
for file in inputFiles:
dataModel = peach.templates[dataModelName].copy(peach)
fd = open(file, "rb")
data = fd.read()
fd.close()
buff = PublisherBuffer(None, data, True)
cracker = DataCracker(peach)
cracker.optmizeModelForCracking(dataModel, True)
cracker.crackData(dataModel, buff)
if dataModel.getValue() == data:
print "Cracking of file '"+file+"' passed."
else:
print "Cracking of file '"+file+"' failed."
print "Done\n"
| false | true |
f72489d8e00e85d8c00ed35e505fae2c30fe7577 | 1,139 | py | Python | Advanced-Algorithms-and-Complexity/Week3 - P vs NP/03 - Budget Allocation/budget_allocation.py | ChristineHu1207/Coursera-Data-Structures-and-Algorithms-Specialization | 27f543ca0778d00ffd624ffcd18bf555660e0168 | [
"MIT"
] | null | null | null | Advanced-Algorithms-and-Complexity/Week3 - P vs NP/03 - Budget Allocation/budget_allocation.py | ChristineHu1207/Coursera-Data-Structures-and-Algorithms-Specialization | 27f543ca0778d00ffd624ffcd18bf555660e0168 | [
"MIT"
] | null | null | null | Advanced-Algorithms-and-Complexity/Week3 - P vs NP/03 - Budget Allocation/budget_allocation.py | ChristineHu1207/Coursera-Data-Structures-and-Algorithms-Specialization | 27f543ca0778d00ffd624ffcd18bf555660e0168 | [
"MIT"
] | null | null | null | # python3
import itertools
n, m = list(map(int, input().split()))
A = []
for i in range(n):
A += [list(map(int, input().split()))]
b = list(map(int, input().split()))
clauses = []
for i, coefficient in enumerate(A):
non_coefficients = [(j, coefficient[j]) for j in range(m) if 0 != coefficient[j]]
l = len(non_coefficients)
for x in range(2 ** l):
current_set = [non_coefficients[j] for j in range(l) if 1 == ((x / 2 ** j) % 2) // 1]
current_sum = 0
for coeff in current_set:
current_sum += coeff[1]
if current_sum > b[i]:
clauses.append([-(coeff[0]+1) for coeff in current_set] + [(coeff[0]+1) for coeff in non_coefficients if coeff not in current_set])
if len(clauses) == 0:
clauses.append([1, -1])
m = 1
print(len(clauses), m)
for c in clauses:
c.append(0)
print(' '.join(map(str, c)))
# This solution prints a simple satisfiable formula
# and passes about half of the tests.
# Change this function to solve the problem.
# def printEquisatisfiableSatFormula():
# print("3 2")
# print("1 2 0")
# print("-1 -2 0")
# print("1 -2 0")
# printEquisatisfiableSatFormula()
| 24.76087 | 137 | 0.627744 |
import itertools
n, m = list(map(int, input().split()))
A = []
for i in range(n):
A += [list(map(int, input().split()))]
b = list(map(int, input().split()))
clauses = []
for i, coefficient in enumerate(A):
non_coefficients = [(j, coefficient[j]) for j in range(m) if 0 != coefficient[j]]
l = len(non_coefficients)
for x in range(2 ** l):
current_set = [non_coefficients[j] for j in range(l) if 1 == ((x / 2 ** j) % 2) // 1]
current_sum = 0
for coeff in current_set:
current_sum += coeff[1]
if current_sum > b[i]:
clauses.append([-(coeff[0]+1) for coeff in current_set] + [(coeff[0]+1) for coeff in non_coefficients if coeff not in current_set])
if len(clauses) == 0:
clauses.append([1, -1])
m = 1
print(len(clauses), m)
for c in clauses:
c.append(0)
print(' '.join(map(str, c)))
| true | true |
f72489ec9d755295d9c7b8adc3c280594304173c | 7,911 | py | Python | bokeh/_version.py | timelyportfolio/bokeh | a976a85535cf137c6238ce9e90b41ab14ae8ce22 | [
"BSD-3-Clause"
] | 2 | 2015-07-23T21:19:52.000Z | 2016-01-25T17:00:15.000Z | bokeh/_version.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | null | null | null | bokeh/_version.py | brian15co/bokeh | 6cecb7211277b9d838039d0eb15e50a10f9ac3d1 | [
"BSD-3-Clause"
] | 2 | 2015-12-22T04:13:10.000Z | 2021-07-06T21:18:04.000Z |
IN_LONG_VERSION_PY = True
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by github's download-from-tag
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
GIT = "git"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
# _version.py, so we do it with a regexp instead. This function is not
# used from _version.py.
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {} # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# Assume all version tags have a digit. git's %d expansion
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
# string, meaning we're inside a checked out source tree.
try:
here = os.path.realpath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {} # not always correct
# versionfile_source is the relative path from the top of the source tree
# (where the .git directory might live) to this file. Invert this to find
# the root from __file__.
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {} # without __file__, we have no hope
# versionfile_source is the relative path from the top of the source
# tree to _version.py. Invert this to find the root from __file__.
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
# we're running from versioneer.py, which means we're running from
# the setup.py in a source tree. sys.argv[0] is setup.py in the root.
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = "Bokeh-"
versionfile_source = "bokeh/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
| 40.362245 | 87 | 0.627102 |
IN_LONG_VERSION_PY = True
# feature). Distribution tarballs (build by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.7+ (https://github.com/warner/python-versioneer)
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
GIT = "git"
import subprocess
import sys
def run_command(args, cwd=None, verbose=False):
try:
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen(args, stdout=subprocess.PIPE, cwd=cwd)
except EnvironmentError:
e = sys.exc_info()[1]
if verbose:
print("unable to run %s" % args[0])
print(e)
return None
stdout = p.communicate()[0].strip()
if sys.version >= '3':
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % args[0])
return None
return stdout
import sys
import re
import os.path
def get_expanded_variables(versionfile_source):
# the code embedded in _version.py can just fetch the value of these
# variables. When used from setup.py, we don't want to import
variables = {}
try:
for line in open(versionfile_source,"r").readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
variables["full"] = mo.group(1)
except EnvironmentError:
pass
return variables
def versions_from_expanded_variables(variables, tag_prefix, verbose=False):
refnames = variables["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("variables are unexpanded, not using")
return {}
refs = set([r.strip() for r in refnames.strip("()").split(",")])
for ref in list(refs):
if not re.search(r'\d', ref):
if verbose:
print("discarding '%s', no digits" % ref)
refs.discard(ref)
# behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us
# distinguish between branches and tags. By ignoring refnames
# without digits, we filter out many common branch names like
# "release" and "stabilization", as well as "HEAD" and "master".
if verbose:
print("remaining refs: %s" % ",".join(sorted(refs)))
for ref in sorted(refs):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return { "version": r,
"full": variables["full"].strip() }
# no suitable tags, so we use the full revision id
if verbose:
print("no suitable tags, using full revision id")
return { "version": variables["full"].strip(),
"full": variables["full"].strip() }
def versions_from_vcs(tag_prefix, versionfile_source, verbose=False):
# this runs 'git' from the root of the source tree. That either means
# someone ran a setup.py command (and this code is in versioneer.py, so
# IN_LONG_VERSION_PY=False, thus the containing directory is the root of
# the source tree), or someone ran a project-specific entry point (and
# this code is in _version.py, so IN_LONG_VERSION_PY=True, thus the
# containing directory is somewhere deeper in the source tree). This only
# gets called if the git-archive 'subst' variables were *not* expanded,
# and _version.py hasn't already been rewritten with a short version
try:
here = os.path.realpath(__file__)
except NameError:
# some py2exe/bbfreeze/non-CPython implementations don't do __file__
return {}
root = here
if IN_LONG_VERSION_PY:
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
root = os.path.dirname(here)
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
return {}
stdout = run_command([GIT, "describe", "--tags", "--dirty", "--always"],
cwd=root)
if stdout is None:
return {}
if not stdout.startswith(tag_prefix):
if verbose:
print("tag '%s' doesn't start with prefix '%s'" % (stdout, tag_prefix))
return {}
tag = stdout[len(tag_prefix):]
stdout = run_command([GIT, "rev-parse", "HEAD"], cwd=root)
if stdout is None:
return {}
full = stdout.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def versions_from_parentdir(parentdir_prefix, versionfile_source, verbose=False):
if IN_LONG_VERSION_PY:
# We're running from _version.py. If it's from a source tree
# (execute-in-place), we can work upwards to find the root of the
# tree, and then check the parent directory for a version string. If
# it's in an installed application, there's no hope.
try:
here = os.path.realpath(__file__)
except NameError:
# py2exe/bbfreeze/non-CPython don't have __file__
return {}
root = here
for i in range(len(versionfile_source.split("/"))):
root = os.path.dirname(root)
else:
here = os.path.realpath(sys.argv[0])
root = os.path.dirname(here)
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with prefix '%s'" %
(root, dirname, parentdir_prefix))
return None
return {"version": dirname[len(parentdir_prefix):], "full": ""}
tag_prefix = ""
parentdir_prefix = "Bokeh-"
versionfile_source = "bokeh/_version.py"
def get_versions(default={"version": "unknown", "full": ""}, verbose=False):
variables = { "refnames": git_refnames, "full": git_full }
ver = versions_from_expanded_variables(variables, tag_prefix, verbose)
if not ver:
ver = versions_from_vcs(tag_prefix, versionfile_source, verbose)
if not ver:
ver = versions_from_parentdir(parentdir_prefix, versionfile_source,
verbose)
if not ver:
ver = default
return ver
| true | true |
f7248a22b006a256442a62ed9db4b0498100a0da | 867 | py | Python | templates/template.py | zara-ms/python_class | 8a9529e14e84c3c2e3fd0a7c793fcb09471ea55c | [
"MIT"
] | null | null | null | templates/template.py | zara-ms/python_class | 8a9529e14e84c3c2e3fd0a7c793fcb09471ea55c | [
"MIT"
] | null | null | null | templates/template.py | zara-ms/python_class | 8a9529e14e84c3c2e3fd0a7c793fcb09471ea55c | [
"MIT"
] | 3 | 2021-04-09T18:40:26.000Z | 2021-09-07T01:15:03.000Z | ´´´
## NAME
[programName].py
## VERSION
[#.#]
## AUTHOR
Zara Paulina Martinez Sanchez <zaram042001@gmail.com>
[Other authors]: [Modifications]
## DATE
[dd/mm/yyyy]
## DESCRIPTION
[briefly describe what the program does]
## CATEGORY
[category of the program: sequence analysis for example]
## USAGE
[programName][-options/arguments]
## ARGUMENTS
[name] [description]
[name] [description]
parser.add_argument("-i", "--input",
metavar="path/to/file",
help="Input file",
required=True)
parser.add_argument("-o", "--output",
help="Output file",
required=False)
## FUNCTIONS
funtionName(arguments):
[Description]
return()
## EXAMPLES
[Example 1: describe the example, input and outputs]
## GITHUB LINK
[Link]
´´´
| 14.213115 | 57 | 0.585928 | ´´´
gramName].py
na Martinez Sanchez <zaram042001@gmail.com>
[Other authors]: [Modifications]
/mm/yyyy]
describe what the program does]
ry of the program: sequence analysis for example]
gramName][-options/arguments]
[description]
[name] [description]
parser.add_argument("-i", "--input",
metavar="path/to/file",
help="Input file",
required=True)
parser.add_argument("-o", "--output",
help="Output file",
required=False)
ame(arguments):
[Description]
return()
e 1: describe the example, input and outputs]
´´
| false | true |
f7248af32f72c111effbd60171246b9815ed3cb7 | 368 | py | Python | sol_runner.py | Square789/AoC | 041aecb9e1a06b5417bdef0eb0ab70a542be04b5 | [
"MIT"
] | 3 | 2020-12-05T17:43:51.000Z | 2020-12-06T10:37:29.000Z | sol_runner.py | Square789/AoC | 041aecb9e1a06b5417bdef0eb0ab70a542be04b5 | [
"MIT"
] | null | null | null | sol_runner.py | Square789/AoC | 041aecb9e1a06b5417bdef0eb0ab70a542be04b5 | [
"MIT"
] | null | null | null | import importlib
import sys
from aoc_input import get_input
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Specify which file to run! [year, day]")
sys.exit()
try:
year = int(sys.argv[1])
day = int(sys.argv[2])
except ValueError:
print("Integer required!")
sys.exit()
module = importlib.import_module(f"y{year}.d{day:>02}")
module.main()
| 18.4 | 56 | 0.673913 | import importlib
import sys
from aoc_input import get_input
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Specify which file to run! [year, day]")
sys.exit()
try:
year = int(sys.argv[1])
day = int(sys.argv[2])
except ValueError:
print("Integer required!")
sys.exit()
module = importlib.import_module(f"y{year}.d{day:>02}")
module.main()
| true | true |
f7248b04de6e0f200dd961244469207c2c19aa5a | 39,165 | py | Python | validator/sawtooth_validator/gossip/gossip.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | 1 | 2018-04-24T11:42:36.000Z | 2018-04-24T11:42:36.000Z | validator/sawtooth_validator/gossip/gossip.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | null | null | null | validator/sawtooth_validator/gossip/gossip.py | ltavag/sawtooth-core | 50659f23437b27ecd666d4cf129f812e6adaedc4 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import logging
import copy
import time
import random
import os
import binascii
from threading import Lock
from functools import partial
from collections import namedtuple
from enum import Enum
from sawtooth_validator.concurrent.thread import InstrumentedThread
from sawtooth_validator.protobuf.network_pb2 import DisconnectMessage
from sawtooth_validator.protobuf.network_pb2 import GossipMessage
from sawtooth_validator.protobuf.network_pb2 import GossipBatchByBatchIdRequest
from sawtooth_validator.protobuf.network_pb2 import \
GossipBatchByTransactionIdRequest
from sawtooth_validator.protobuf.network_pb2 import GossipBlockRequest
from sawtooth_validator.protobuf import validator_pb2
from sawtooth_validator.protobuf.network_pb2 import PeerRegisterRequest
from sawtooth_validator.protobuf.network_pb2 import PeerUnregisterRequest
from sawtooth_validator.protobuf.network_pb2 import GetPeersRequest
from sawtooth_validator.protobuf.network_pb2 import GetPeersResponse
from sawtooth_validator.protobuf.network_pb2 import NetworkAcknowledgement
from sawtooth_validator.exceptions import PeeringException
LOGGER = logging.getLogger(__name__)
class PeerStatus(Enum):
CLOSED = 1
TEMP = 2
PEER = 3
class EndpointStatus(Enum):
# Endpoint will be used for peering
PEERING = 1
# Endpoint will be used to request peers
TOPOLOGY = 2
EndpointInfo = namedtuple('EndpointInfo',
['status', 'time', "retry_threshold"])
StaticPeerInfo = namedtuple('StaticPeerInfo',
['time', 'retry_threshold', 'count'])
INITIAL_RETRY_FREQUENCY = 10
MAXIMUM_RETRY_FREQUENCY = 300
MAXIMUM_STATIC_RETRY_FREQUENCY = 3600
MAXIMUM_STATIC_RETRIES = 24
TIME_TO_LIVE = 3
# This is the protocol version number. It should only be incremented when
# there are changes to the network protocols, as well as only once per
# release.
NETWORK_PROTOCOL_VERSION = 1
class Gossip(object):
def __init__(self, network,
settings_cache,
current_chain_head_func,
current_root_func,
endpoint=None,
peering_mode='static',
initial_seed_endpoints=None,
initial_peer_endpoints=None,
minimum_peer_connectivity=3,
maximum_peer_connectivity=10,
topology_check_frequency=1
):
"""Constructor for the Gossip object. Gossip defines the
overlay network above the lower level networking classes.
Args:
network (networking.Interconnect): Provides inbound and
outbound network connections.
settings_cache (state.SettingsCache): A cache for on chain
settings.
current_chain_head_func (function): returns the current chain head.
current_root_func (function): returns the current state root hash
for the current chain root.
endpoint (str): The publically accessible zmq-style uri
endpoint for this validator.
peering_mode (str): The type of peering approach. Either 'static'
or 'dynamic'. In 'static' mode, no attempted topology
buildout occurs -- the validator only attempts to initiate
peering connections with endpoints specified in the
peer_list. In 'dynamic' mode, the validator will first
attempt to initiate peering connections with endpoints
specified in the peer_list and then attempt to do a
topology buildout starting with peer lists obtained from
endpoints in the seeds_list. In either mode, the validator
will accept incoming peer requests up to max_peers.
initial_seed_endpoints ([str]): A list of initial endpoints
to attempt to connect and gather initial topology buildout
information from. These are specified as zmq-compatible
URIs (e.g. tcp://hostname:port).
initial_peer_endpoints ([str]): A list of initial peer endpoints
to attempt to connect and peer with. These are specified
as zmq-compatible URIs (e.g. tcp://hostname:port).
minimum_peer_connectivity (int): If the number of connected
peers is below this threshold, the topology builder will
continue to attempt to identify new candidate peers to
connect with.
maximum_peer_connectivity (int): The validator will reject
new peer requests if the number of connected peers
reaches this threshold.
topology_check_frequency (int): The time in seconds between
topology update checks.
"""
self._peering_mode = peering_mode
self._lock = Lock()
self._network = network
self._endpoint = endpoint
self._initial_seed_endpoints = initial_seed_endpoints \
if initial_seed_endpoints else []
self._initial_peer_endpoints = initial_peer_endpoints \
if initial_peer_endpoints else []
self._minimum_peer_connectivity = minimum_peer_connectivity
self._maximum_peer_connectivity = maximum_peer_connectivity
self._topology_check_frequency = topology_check_frequency
self._settings_cache = settings_cache
self._current_chain_head_func = current_chain_head_func
self._current_root_func = current_root_func
self._topology = None
self._peers = {}
def send_peers(self, connection_id):
"""Sends a message containing our peers to the
connection identified by connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
"""
with self._lock:
# Needs to actually be the list of advertised endpoints of
# our peers
peer_endpoints = list(self._peers.values())
if self._endpoint:
peer_endpoints.append(self._endpoint)
peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)
try:
# Send a one_way message because the connection will be closed
# if this is a temp connection.
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
peers_response.SerializeToString(),
connection_id,
one_way=True)
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id)
def add_candidate_peer_endpoints(self, peer_endpoints):
"""Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with.
"""
if self._topology:
self._topology.add_candidate_peer_endpoints(peer_endpoints)
else:
LOGGER.debug("Could not add peer endpoints to topology. "
"ConnectionManager does not exist.")
def get_peers(self):
"""Returns a copy of the gossip peers.
"""
with self._lock:
return copy.copy(self._peers)
@property
def endpoint(self):
"""Returns the validator's public endpoint.
"""
return self._endpoint
def register_peer(self, connection_id, endpoint):
"""Registers a connected connection_id.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
endpoint (str): The publically reachable endpoint of the new
peer
"""
with self._lock:
if len(self._peers) < self._maximum_peer_connectivity:
self._peers[connection_id] = endpoint
self._topology.set_connection_status(connection_id,
PeerStatus.PEER)
LOGGER.debug("Added connection_id %s with endpoint %s, "
"connected identities are now %s",
connection_id, endpoint, self._peers)
else:
raise PeeringException(
"At maximum configured number of peers: {} "
"Rejecting peering request from {}.".format(
self._maximum_peer_connectivity,
endpoint))
def unregister_peer(self, connection_id):
"""Removes a connection_id from the registry.
Args:
connection_id (str): A unique identifier which identifies an
connection on the network server socket.
"""
with self._lock:
if connection_id in self._peers:
del self._peers[connection_id]
LOGGER.debug("Removed connection_id %s, "
"connected identities are now %s",
connection_id, self._peers)
self._topology.set_connection_status(connection_id,
PeerStatus.TEMP)
else:
LOGGER.warning("Connection unregister failed as connection "
"was not registered: %s",
connection_id)
def get_time_to_live(self):
time_to_live = \
self._settings_cache.get_setting(
"sawtooth.gossip.time_to_live",
self._current_root_func(),
default_value=TIME_TO_LIVE
)
return int(time_to_live)
def broadcast_block(self, block, exclude=None, time_to_live=None):
if time_to_live is None:
time_to_live = self.get_time_to_live()
gossip_message = GossipMessage(
content_type=GossipMessage.BLOCK,
content=block.SerializeToString(),
time_to_live=time_to_live)
self.broadcast(
gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude)
def broadcast_block_request(self, block_id):
time_to_live = self.get_time_to_live()
block_request = GossipBlockRequest(
block_id=block_id,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.broadcast(block_request,
validator_pb2.Message.GOSSIP_BLOCK_REQUEST)
def send_block_request(self, block_id, connection_id):
time_to_live = self.get_time_to_live()
block_request = GossipBlockRequest(
block_id=block_id,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.send(validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
block_request.SerializeToString(),
connection_id,
one_way=True)
def broadcast_batch(self, batch, exclude=None, time_to_live=None):
if time_to_live is None:
time_to_live = self.get_time_to_live()
gossip_message = GossipMessage(
content_type=GossipMessage.BATCH,
content=batch.SerializeToString(),
time_to_live=time_to_live)
self.broadcast(
gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude)
def broadcast_batch_by_transaction_id_request(self, transaction_ids):
time_to_live = self.get_time_to_live()
batch_request = GossipBatchByTransactionIdRequest(
ids=transaction_ids,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.broadcast(
batch_request,
validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST)
def broadcast_batch_by_batch_id_request(self, batch_id):
time_to_live = self.get_time_to_live()
batch_request = GossipBatchByBatchIdRequest(
id=batch_id,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.broadcast(
batch_request,
validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST)
def send(self, message_type, message, connection_id, one_way=False):
"""Sends a message via the network.
Args:
message_type (str): The type of the message.
message (bytes): The message to be sent.
connection_id (str): The connection to send it to.
"""
try:
self._network.send(message_type, message, connection_id,
one_way=one_way)
except ValueError:
LOGGER.debug("Connection %s is no longer valid. "
"Removing from list of peers.",
connection_id)
if connection_id in self._peers:
del self._peers[connection_id]
def broadcast(self, gossip_message, message_type, exclude=None):
"""Broadcast gossip messages.
Broadcast the message to all peers unless they are in the excluded
list.
Args:
gossip_message: The message to be broadcast.
message_type: Type of the message.
exclude: A list of connection_ids that should be excluded from this
broadcast.
"""
with self._lock:
if exclude is None:
exclude = []
for connection_id in self._peers.copy():
if connection_id not in exclude and \
self._network.is_connection_handshake_complete(
connection_id):
self.send(
message_type,
gossip_message.SerializeToString(),
connection_id,
one_way=True)
def connect_success(self, connection_id):
"""
Notify topology that a connection has been properly authorized
Args:
connection_id: The connection id for the authorized connection.
"""
if self._topology:
self._topology.connect_success(connection_id)
def remove_temp_endpoint(self, endpoint):
"""
Remove temporary endpoints that never finished authorization.
Args:
endpoint: The endpoint that is not authorized to connect to the
network.
"""
if self._topology:
self._topology.remove_temp_endpoint(endpoint)
def start(self):
self._topology = ConnectionManager(
gossip=self,
network=self._network,
endpoint=self._endpoint,
current_chain_head_func=self._current_chain_head_func,
initial_peer_endpoints=self._initial_peer_endpoints,
initial_seed_endpoints=self._initial_seed_endpoints,
peering_mode=self._peering_mode,
min_peers=self._minimum_peer_connectivity,
max_peers=self._maximum_peer_connectivity,
check_frequency=self._topology_check_frequency)
self._topology.start()
def stop(self):
for peer in self.get_peers():
request = PeerUnregisterRequest()
try:
self._network.send(validator_pb2.Message.GOSSIP_UNREGISTER,
request.SerializeToString(),
peer)
except ValueError:
pass
if self._topology:
self._topology.stop()
class ConnectionManager(InstrumentedThread):
def __init__(self, gossip, network, endpoint,
current_chain_head_func,
initial_peer_endpoints, initial_seed_endpoints,
peering_mode, min_peers=3, max_peers=10,
check_frequency=1):
"""Constructor for the ConnectionManager class.
Args:
gossip (gossip.Gossip): The gossip overlay network.
network (network.Interconnect): The underlying network.
endpoint (str): A zmq-style endpoint uri representing
this validator's publically reachable endpoint.
current_chain_head_func (function): Returns the current chain head.
initial_peer_endpoints ([str]): A list of static peers
to attempt to connect and peer with.
initial_seed_endpoints ([str]): A list of endpoints to
connect to and get candidate peer lists to attempt
to reach min_peers threshold.
peering_mode (str): Either 'static' or 'dynamic'. 'static'
only connects to peers in initial_peer_endpoints.
'dynamic' connects to peers in initial_peer_endpoints
and gets candidate peer lists from initial_seed_endpoints.
min_peers (int): The minimum number of peers required to stop
attempting candidate connections.
max_peers (int): The maximum number of active peer connections
to allow.
check_frequency (int): How often to attempt dynamic connectivity.
"""
super().__init__(name="ConnectionManager")
self._lock = Lock()
self._stopped = False
self._gossip = gossip
self._network = network
self._endpoint = endpoint
self._current_chain_head_func = current_chain_head_func
self._initial_peer_endpoints = initial_peer_endpoints
self._initial_seed_endpoints = initial_seed_endpoints
self._peering_mode = peering_mode
self._min_peers = min_peers
self._max_peers = max_peers
self._check_frequency = check_frequency
self._candidate_peer_endpoints = []
# Seconds to wait for messages to arrive
self._response_duration = 2
self._connection_statuses = {}
self._temp_endpoints = {}
self._static_peer_status = {}
def start(self):
# First, attempt to connect to explicit peers
for endpoint in self._initial_peer_endpoints:
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=0,
retry_threshold=INITIAL_RETRY_FREQUENCY,
count=0)
super().start()
def run(self):
has_chain_head = self._current_chain_head_func() is not None
while not self._stopped:
try:
if self._peering_mode == 'dynamic':
self.retry_dynamic_peering()
elif self._peering_mode == 'static':
self.retry_static_peering()
# This tests for a degenerate case where the node is connected
# to peers, but at first connection no peer had a valid chain
# head. Keep querying connected peers until a valid chain head
# is received.
has_chain_head = has_chain_head or \
self._current_chain_head_func() is not None
if not has_chain_head:
peered_connections = self._get_peered_connections()
if peered_connections:
LOGGER.debug(
'Have not received a chain head from peers. '
'Requesting from %s',
peered_connections)
self._request_chain_head(peered_connections)
time.sleep(self._check_frequency)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unhandled exception during peer refresh")
def stop(self):
self._stopped = True
for connection_id in self._connection_statuses:
try:
if self._connection_statuses[connection_id] == \
PeerStatus.CLOSED:
continue
msg = DisconnectMessage()
self._network.send(
validator_pb2.Message.NETWORK_DISCONNECT,
msg.SerializeToString(),
connection_id)
self._connection_statuses[connection_id] = PeerStatus.CLOSED
except ValueError:
# Connection has already been disconnected.
pass
def _get_peered_connections(self):
peers = self._gossip.get_peers()
return [conn_id for conn_id in peers
if self._connection_statuses[conn_id] == PeerStatus.PEER]
def _request_chain_head(self, peered_connections):
"""Request chain head from the given peer ids.
Args:
peered_connecions (:list:str): a list of peer connection ids where
the requests will be sent.
"""
for conn_id in peered_connections:
self._gossip.send_block_request("HEAD", conn_id)
def retry_dynamic_peering(self):
self._refresh_peer_list(self._gossip.get_peers())
peers = self._gossip.get_peers()
peer_count = len(peers)
if peer_count < self._min_peers:
LOGGER.debug(
"Number of peers (%s) below "
"minimum peer threshold (%s). "
"Doing topology search.",
peer_count,
self._min_peers)
self._reset_candidate_peer_endpoints()
self._refresh_peer_list(peers)
# Cleans out any old connections that have disconnected
self._refresh_connection_list()
self._check_temp_endpoints()
peers = self._gossip.get_peers()
self._get_peers_of_peers(peers)
self._get_peers_of_endpoints(
peers,
self._initial_seed_endpoints)
# Wait for GOSSIP_GET_PEER_RESPONSE messages to arrive
time.sleep(self._response_duration)
peered_endpoints = list(peers.values())
with self._lock:
unpeered_candidates = list(
set(self._candidate_peer_endpoints)
- set(peered_endpoints)
- set([self._endpoint]))
LOGGER.debug(
"Peers are: %s. "
"Unpeered candidates are: %s",
peered_endpoints,
unpeered_candidates)
if unpeered_candidates:
self._attempt_to_peer_with_endpoint(
random.choice(unpeered_candidates))
def retry_static_peering(self):
with self._lock:
# Endpoints that have reached their retry count and should be
# removed
to_remove = []
for endpoint in self._initial_peer_endpoints:
connection_id = None
try:
connection_id = \
self._network.get_connection_id_by_endpoint(endpoint)
except KeyError:
pass
static_peer_info = self._static_peer_status[endpoint]
if connection_id is not None:
if connection_id in self._connection_statuses:
# Endpoint is already a Peer
if self._connection_statuses[connection_id] == \
PeerStatus.PEER:
# reset static peering info
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=0,
retry_threshold=INITIAL_RETRY_FREQUENCY,
count=0)
continue
if (time.time() - static_peer_info.time) > \
static_peer_info.retry_threshold:
LOGGER.debug("Endpoint has not completed authorization in "
"%s seconds: %s",
static_peer_info.retry_threshold,
endpoint)
if connection_id is not None:
# If the connection exists remove it before retrying to
# authorize.
try:
self._network.remove_connection(connection_id)
except KeyError:
pass
if static_peer_info.retry_threshold == \
MAXIMUM_STATIC_RETRY_FREQUENCY:
if static_peer_info.count >= MAXIMUM_STATIC_RETRIES:
# Unable to peer with endpoint
to_remove.append(endpoint)
continue
else:
# At maximum retry threashold, increment count
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=time.time(),
retry_threshold=min(
static_peer_info.retry_threshold * 2,
MAXIMUM_STATIC_RETRY_FREQUENCY),
count=static_peer_info.count + 1)
else:
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=time.time(),
retry_threshold=min(
static_peer_info.retry_threshold * 2,
MAXIMUM_STATIC_RETRY_FREQUENCY),
count=0)
LOGGER.debug("attempting to peer with %s", endpoint)
self._network.add_outbound_connection(endpoint)
self._temp_endpoints[endpoint] = EndpointInfo(
EndpointStatus.PEERING,
time.time(),
INITIAL_RETRY_FREQUENCY)
for endpoint in to_remove:
# Endpoints that have reached their retry count and should be
# removed
self._initial_peer_endpoints.remove(endpoint)
del self._static_peer_status[endpoint]
def add_candidate_peer_endpoints(self, peer_endpoints):
"""Adds candidate endpoints to the list of endpoints to
attempt to peer with.
Args:
peer_endpoints ([str]): A list of public uri's which the
validator can attempt to peer with.
"""
with self._lock:
for endpoint in peer_endpoints:
if endpoint not in self._candidate_peer_endpoints:
self._candidate_peer_endpoints.append(endpoint)
def set_connection_status(self, connection_id, status):
self._connection_statuses[connection_id] = status
def remove_temp_endpoint(self, endpoint):
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
def _check_temp_endpoints(self):
with self._lock:
for endpoint in self._temp_endpoints:
endpoint_info = self._temp_endpoints[endpoint]
if (time.time() - endpoint_info.time) > \
endpoint_info.retry_threshold:
LOGGER.debug("Endpoint has not completed authorization in "
"%s seconds: %s",
endpoint_info.retry_threshold,
endpoint)
try:
# If the connection exists remove it before retrying to
# authorize. If the connection does not exist, a
# KeyError will be thrown.
conn_id = \
self._network.get_connection_id_by_endpoint(
endpoint)
self._network.remove_connection(conn_id)
except KeyError:
pass
self._network.add_outbound_connection(endpoint)
self._temp_endpoints[endpoint] = EndpointInfo(
endpoint_info.status,
time.time(),
min(endpoint_info.retry_threshold * 2,
MAXIMUM_RETRY_FREQUENCY))
def _refresh_peer_list(self, peers):
for conn_id in peers:
try:
self._network.get_connection_id_by_endpoint(
peers[conn_id])
except KeyError:
LOGGER.debug("removing peer %s because "
"connection went away",
peers[conn_id])
self._gossip.unregister_peer(conn_id)
if conn_id in self._connection_statuses:
del self._connection_statuses[conn_id]
def _refresh_connection_list(self):
with self._lock:
closed_connections = []
for connection_id in self._connection_statuses:
if not self._network.has_connection(connection_id):
closed_connections.append(connection_id)
for connection_id in closed_connections:
del self._connection_statuses[connection_id]
def _get_peers_of_peers(self, peers):
get_peers_request = GetPeersRequest()
for conn_id in peers:
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
get_peers_request.SerializeToString(),
conn_id)
except ValueError:
LOGGER.debug("Peer disconnected: %s", conn_id)
def _get_peers_of_endpoints(self, peers, endpoints):
get_peers_request = GetPeersRequest()
for endpoint in endpoints:
conn_id = None
try:
conn_id = self._network.get_connection_id_by_endpoint(
endpoint)
except KeyError:
# If the connection does not exist, send a connection request
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
self._temp_endpoints[endpoint] = EndpointInfo(
EndpointStatus.TOPOLOGY,
time.time(),
INITIAL_RETRY_FREQUENCY)
self._network.add_outbound_connection(endpoint)
# If the connection does exist, request peers.
if conn_id is not None:
if not self._network.is_connection_handshake_complete(conn_id):
# has not finished the authorization (trust/challenge)
# process yet.
continue
elif conn_id in peers:
# connected and peered - we've already sent peer request
continue
else:
# connected but not peered
if endpoint in self._temp_endpoints:
# Endpoint is not yet authorized, do not request peers
continue
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
get_peers_request.SerializeToString(),
conn_id)
except ValueError:
LOGGER.debug("Connection disconnected: %s", conn_id)
def _attempt_to_peer_with_endpoint(self, endpoint):
LOGGER.debug("Attempting to connect/peer with %s", endpoint)
# check if the connection exists, if it does - send,
# otherwise create it
try:
connection_id = \
self._network.get_connection_id_by_endpoint(
endpoint)
register_request = PeerRegisterRequest(
endpoint=self._endpoint,
protocol_version=NETWORK_PROTOCOL_VERSION)
self._network.send(
validator_pb2.Message.GOSSIP_REGISTER,
register_request.SerializeToString(),
connection_id,
callback=partial(
self._peer_callback,
endpoint=endpoint,
connection_id=connection_id))
except KeyError:
# if the connection uri wasn't found in the network's
# connections, it raises a KeyError and we need to add
# a new outbound connection
with self._lock:
self._temp_endpoints[endpoint] = EndpointInfo(
EndpointStatus.PEERING,
time.time(),
INITIAL_RETRY_FREQUENCY)
self._network.add_outbound_connection(endpoint)
def _reset_candidate_peer_endpoints(self):
with self._lock:
self._candidate_peer_endpoints = []
def _peer_callback(self, request, result, connection_id, endpoint=None):
with self._lock:
ack = NetworkAcknowledgement()
ack.ParseFromString(result.content)
if ack.status == ack.ERROR:
LOGGER.debug("Peering request to %s was NOT successful",
connection_id)
self._remove_temporary_connection(connection_id)
elif ack.status == ack.OK:
LOGGER.debug("Peering request to %s was successful",
connection_id)
if endpoint:
try:
self._gossip.register_peer(connection_id, endpoint)
self._connection_statuses[connection_id] = \
PeerStatus.PEER
self._gossip.send_block_request("HEAD", connection_id)
except PeeringException as e:
# Remove unsuccessful peer
LOGGER.warning('Unable to successfully peer with '
'connection_id: %s, due to %s',
connection_id, str(e))
self._remove_temporary_connection(connection_id)
else:
LOGGER.debug("Cannot register peer with no endpoint for "
"connection_id: %s",
connection_id)
self._remove_temporary_connection(connection_id)
def _remove_temporary_connection(self, connection_id):
status = self._connection_statuses.get(connection_id)
if status == PeerStatus.TEMP:
LOGGER.debug("Closing connection to %s", connection_id)
msg = DisconnectMessage()
try:
self._network.send(validator_pb2.Message.NETWORK_DISCONNECT,
msg.SerializeToString(),
connection_id)
except ValueError:
pass
del self._connection_statuses[connection_id]
self._network.remove_connection(connection_id)
elif status == PeerStatus.PEER:
LOGGER.debug("Connection close request for peer ignored: %s",
connection_id)
elif status is None:
LOGGER.debug("Connection close request for unknown connection "
"ignored: %s",
connection_id)
def connect_success(self, connection_id):
"""
Check to see if the successful connection is meant to be peered with.
If not, it should be used to get the peers from the endpoint.
"""
endpoint = self._network.connection_id_to_endpoint(connection_id)
endpoint_info = self._temp_endpoints.get(endpoint)
LOGGER.debug("Endpoint has completed authorization: %s (id: %s)",
endpoint,
connection_id)
if endpoint_info is None:
LOGGER.debug("Received unknown endpoint: %s", endpoint)
elif endpoint_info.status == EndpointStatus.PEERING:
self._connect_success_peering(connection_id, endpoint)
elif endpoint_info.status == EndpointStatus.TOPOLOGY:
self._connect_success_topology(connection_id)
else:
LOGGER.debug("Endpoint has unknown status: %s", endpoint)
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
def _connect_success_peering(self, connection_id, endpoint):
LOGGER.debug("Connection to %s succeeded", connection_id)
register_request = PeerRegisterRequest(
endpoint=self._endpoint,
protocol_version=NETWORK_PROTOCOL_VERSION)
self._connection_statuses[connection_id] = PeerStatus.TEMP
try:
self._network.send(
validator_pb2.Message.GOSSIP_REGISTER,
register_request.SerializeToString(),
connection_id,
callback=partial(
self._peer_callback,
connection_id=connection_id,
endpoint=endpoint))
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id)
def _connect_success_topology(self, connection_id):
LOGGER.debug("Connection to %s succeeded for topology request",
connection_id)
self._connection_statuses[connection_id] = PeerStatus.TEMP
get_peers_request = GetPeersRequest()
def callback(request, result):
# request, result are ignored, but required by the callback
self._remove_temporary_connection(connection_id)
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
get_peers_request.SerializeToString(),
connection_id,
callback=callback)
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id)
| 41.664894 | 80 | 0.578808 |
import logging
import copy
import time
import random
import os
import binascii
from threading import Lock
from functools import partial
from collections import namedtuple
from enum import Enum
from sawtooth_validator.concurrent.thread import InstrumentedThread
from sawtooth_validator.protobuf.network_pb2 import DisconnectMessage
from sawtooth_validator.protobuf.network_pb2 import GossipMessage
from sawtooth_validator.protobuf.network_pb2 import GossipBatchByBatchIdRequest
from sawtooth_validator.protobuf.network_pb2 import \
GossipBatchByTransactionIdRequest
from sawtooth_validator.protobuf.network_pb2 import GossipBlockRequest
from sawtooth_validator.protobuf import validator_pb2
from sawtooth_validator.protobuf.network_pb2 import PeerRegisterRequest
from sawtooth_validator.protobuf.network_pb2 import PeerUnregisterRequest
from sawtooth_validator.protobuf.network_pb2 import GetPeersRequest
from sawtooth_validator.protobuf.network_pb2 import GetPeersResponse
from sawtooth_validator.protobuf.network_pb2 import NetworkAcknowledgement
from sawtooth_validator.exceptions import PeeringException
LOGGER = logging.getLogger(__name__)
class PeerStatus(Enum):
CLOSED = 1
TEMP = 2
PEER = 3
class EndpointStatus(Enum):
PEERING = 1
TOPOLOGY = 2
EndpointInfo = namedtuple('EndpointInfo',
['status', 'time', "retry_threshold"])
StaticPeerInfo = namedtuple('StaticPeerInfo',
['time', 'retry_threshold', 'count'])
INITIAL_RETRY_FREQUENCY = 10
MAXIMUM_RETRY_FREQUENCY = 300
MAXIMUM_STATIC_RETRY_FREQUENCY = 3600
MAXIMUM_STATIC_RETRIES = 24
TIME_TO_LIVE = 3
NETWORK_PROTOCOL_VERSION = 1
class Gossip(object):
def __init__(self, network,
settings_cache,
current_chain_head_func,
current_root_func,
endpoint=None,
peering_mode='static',
initial_seed_endpoints=None,
initial_peer_endpoints=None,
minimum_peer_connectivity=3,
maximum_peer_connectivity=10,
topology_check_frequency=1
):
self._peering_mode = peering_mode
self._lock = Lock()
self._network = network
self._endpoint = endpoint
self._initial_seed_endpoints = initial_seed_endpoints \
if initial_seed_endpoints else []
self._initial_peer_endpoints = initial_peer_endpoints \
if initial_peer_endpoints else []
self._minimum_peer_connectivity = minimum_peer_connectivity
self._maximum_peer_connectivity = maximum_peer_connectivity
self._topology_check_frequency = topology_check_frequency
self._settings_cache = settings_cache
self._current_chain_head_func = current_chain_head_func
self._current_root_func = current_root_func
self._topology = None
self._peers = {}
def send_peers(self, connection_id):
with self._lock:
peer_endpoints = list(self._peers.values())
if self._endpoint:
peer_endpoints.append(self._endpoint)
peers_response = GetPeersResponse(peer_endpoints=peer_endpoints)
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_RESPONSE,
peers_response.SerializeToString(),
connection_id,
one_way=True)
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id)
def add_candidate_peer_endpoints(self, peer_endpoints):
if self._topology:
self._topology.add_candidate_peer_endpoints(peer_endpoints)
else:
LOGGER.debug("Could not add peer endpoints to topology. "
"ConnectionManager does not exist.")
def get_peers(self):
with self._lock:
return copy.copy(self._peers)
@property
def endpoint(self):
return self._endpoint
def register_peer(self, connection_id, endpoint):
with self._lock:
if len(self._peers) < self._maximum_peer_connectivity:
self._peers[connection_id] = endpoint
self._topology.set_connection_status(connection_id,
PeerStatus.PEER)
LOGGER.debug("Added connection_id %s with endpoint %s, "
"connected identities are now %s",
connection_id, endpoint, self._peers)
else:
raise PeeringException(
"At maximum configured number of peers: {} "
"Rejecting peering request from {}.".format(
self._maximum_peer_connectivity,
endpoint))
def unregister_peer(self, connection_id):
with self._lock:
if connection_id in self._peers:
del self._peers[connection_id]
LOGGER.debug("Removed connection_id %s, "
"connected identities are now %s",
connection_id, self._peers)
self._topology.set_connection_status(connection_id,
PeerStatus.TEMP)
else:
LOGGER.warning("Connection unregister failed as connection "
"was not registered: %s",
connection_id)
def get_time_to_live(self):
time_to_live = \
self._settings_cache.get_setting(
"sawtooth.gossip.time_to_live",
self._current_root_func(),
default_value=TIME_TO_LIVE
)
return int(time_to_live)
def broadcast_block(self, block, exclude=None, time_to_live=None):
if time_to_live is None:
time_to_live = self.get_time_to_live()
gossip_message = GossipMessage(
content_type=GossipMessage.BLOCK,
content=block.SerializeToString(),
time_to_live=time_to_live)
self.broadcast(
gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude)
def broadcast_block_request(self, block_id):
time_to_live = self.get_time_to_live()
block_request = GossipBlockRequest(
block_id=block_id,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.broadcast(block_request,
validator_pb2.Message.GOSSIP_BLOCK_REQUEST)
def send_block_request(self, block_id, connection_id):
time_to_live = self.get_time_to_live()
block_request = GossipBlockRequest(
block_id=block_id,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.send(validator_pb2.Message.GOSSIP_BLOCK_REQUEST,
block_request.SerializeToString(),
connection_id,
one_way=True)
def broadcast_batch(self, batch, exclude=None, time_to_live=None):
if time_to_live is None:
time_to_live = self.get_time_to_live()
gossip_message = GossipMessage(
content_type=GossipMessage.BATCH,
content=batch.SerializeToString(),
time_to_live=time_to_live)
self.broadcast(
gossip_message, validator_pb2.Message.GOSSIP_MESSAGE, exclude)
def broadcast_batch_by_transaction_id_request(self, transaction_ids):
time_to_live = self.get_time_to_live()
batch_request = GossipBatchByTransactionIdRequest(
ids=transaction_ids,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.broadcast(
batch_request,
validator_pb2.Message.GOSSIP_BATCH_BY_TRANSACTION_ID_REQUEST)
def broadcast_batch_by_batch_id_request(self, batch_id):
time_to_live = self.get_time_to_live()
batch_request = GossipBatchByBatchIdRequest(
id=batch_id,
nonce=binascii.b2a_hex(os.urandom(16)),
time_to_live=time_to_live)
self.broadcast(
batch_request,
validator_pb2.Message.GOSSIP_BATCH_BY_BATCH_ID_REQUEST)
def send(self, message_type, message, connection_id, one_way=False):
try:
self._network.send(message_type, message, connection_id,
one_way=one_way)
except ValueError:
LOGGER.debug("Connection %s is no longer valid. "
"Removing from list of peers.",
connection_id)
if connection_id in self._peers:
del self._peers[connection_id]
def broadcast(self, gossip_message, message_type, exclude=None):
with self._lock:
if exclude is None:
exclude = []
for connection_id in self._peers.copy():
if connection_id not in exclude and \
self._network.is_connection_handshake_complete(
connection_id):
self.send(
message_type,
gossip_message.SerializeToString(),
connection_id,
one_way=True)
def connect_success(self, connection_id):
if self._topology:
self._topology.connect_success(connection_id)
def remove_temp_endpoint(self, endpoint):
if self._topology:
self._topology.remove_temp_endpoint(endpoint)
def start(self):
self._topology = ConnectionManager(
gossip=self,
network=self._network,
endpoint=self._endpoint,
current_chain_head_func=self._current_chain_head_func,
initial_peer_endpoints=self._initial_peer_endpoints,
initial_seed_endpoints=self._initial_seed_endpoints,
peering_mode=self._peering_mode,
min_peers=self._minimum_peer_connectivity,
max_peers=self._maximum_peer_connectivity,
check_frequency=self._topology_check_frequency)
self._topology.start()
def stop(self):
for peer in self.get_peers():
request = PeerUnregisterRequest()
try:
self._network.send(validator_pb2.Message.GOSSIP_UNREGISTER,
request.SerializeToString(),
peer)
except ValueError:
pass
if self._topology:
self._topology.stop()
class ConnectionManager(InstrumentedThread):
def __init__(self, gossip, network, endpoint,
current_chain_head_func,
initial_peer_endpoints, initial_seed_endpoints,
peering_mode, min_peers=3, max_peers=10,
check_frequency=1):
super().__init__(name="ConnectionManager")
self._lock = Lock()
self._stopped = False
self._gossip = gossip
self._network = network
self._endpoint = endpoint
self._current_chain_head_func = current_chain_head_func
self._initial_peer_endpoints = initial_peer_endpoints
self._initial_seed_endpoints = initial_seed_endpoints
self._peering_mode = peering_mode
self._min_peers = min_peers
self._max_peers = max_peers
self._check_frequency = check_frequency
self._candidate_peer_endpoints = []
self._response_duration = 2
self._connection_statuses = {}
self._temp_endpoints = {}
self._static_peer_status = {}
def start(self):
for endpoint in self._initial_peer_endpoints:
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=0,
retry_threshold=INITIAL_RETRY_FREQUENCY,
count=0)
super().start()
def run(self):
has_chain_head = self._current_chain_head_func() is not None
while not self._stopped:
try:
if self._peering_mode == 'dynamic':
self.retry_dynamic_peering()
elif self._peering_mode == 'static':
self.retry_static_peering()
has_chain_head = has_chain_head or \
self._current_chain_head_func() is not None
if not has_chain_head:
peered_connections = self._get_peered_connections()
if peered_connections:
LOGGER.debug(
'Have not received a chain head from peers. '
'Requesting from %s',
peered_connections)
self._request_chain_head(peered_connections)
time.sleep(self._check_frequency)
except Exception:
LOGGER.exception("Unhandled exception during peer refresh")
def stop(self):
self._stopped = True
for connection_id in self._connection_statuses:
try:
if self._connection_statuses[connection_id] == \
PeerStatus.CLOSED:
continue
msg = DisconnectMessage()
self._network.send(
validator_pb2.Message.NETWORK_DISCONNECT,
msg.SerializeToString(),
connection_id)
self._connection_statuses[connection_id] = PeerStatus.CLOSED
except ValueError:
pass
def _get_peered_connections(self):
peers = self._gossip.get_peers()
return [conn_id for conn_id in peers
if self._connection_statuses[conn_id] == PeerStatus.PEER]
def _request_chain_head(self, peered_connections):
for conn_id in peered_connections:
self._gossip.send_block_request("HEAD", conn_id)
def retry_dynamic_peering(self):
self._refresh_peer_list(self._gossip.get_peers())
peers = self._gossip.get_peers()
peer_count = len(peers)
if peer_count < self._min_peers:
LOGGER.debug(
"Number of peers (%s) below "
"minimum peer threshold (%s). "
"Doing topology search.",
peer_count,
self._min_peers)
self._reset_candidate_peer_endpoints()
self._refresh_peer_list(peers)
self._refresh_connection_list()
self._check_temp_endpoints()
peers = self._gossip.get_peers()
self._get_peers_of_peers(peers)
self._get_peers_of_endpoints(
peers,
self._initial_seed_endpoints)
time.sleep(self._response_duration)
peered_endpoints = list(peers.values())
with self._lock:
unpeered_candidates = list(
set(self._candidate_peer_endpoints)
- set(peered_endpoints)
- set([self._endpoint]))
LOGGER.debug(
"Peers are: %s. "
"Unpeered candidates are: %s",
peered_endpoints,
unpeered_candidates)
if unpeered_candidates:
self._attempt_to_peer_with_endpoint(
random.choice(unpeered_candidates))
def retry_static_peering(self):
with self._lock:
to_remove = []
for endpoint in self._initial_peer_endpoints:
connection_id = None
try:
connection_id = \
self._network.get_connection_id_by_endpoint(endpoint)
except KeyError:
pass
static_peer_info = self._static_peer_status[endpoint]
if connection_id is not None:
if connection_id in self._connection_statuses:
if self._connection_statuses[connection_id] == \
PeerStatus.PEER:
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=0,
retry_threshold=INITIAL_RETRY_FREQUENCY,
count=0)
continue
if (time.time() - static_peer_info.time) > \
static_peer_info.retry_threshold:
LOGGER.debug("Endpoint has not completed authorization in "
"%s seconds: %s",
static_peer_info.retry_threshold,
endpoint)
if connection_id is not None:
try:
self._network.remove_connection(connection_id)
except KeyError:
pass
if static_peer_info.retry_threshold == \
MAXIMUM_STATIC_RETRY_FREQUENCY:
if static_peer_info.count >= MAXIMUM_STATIC_RETRIES:
to_remove.append(endpoint)
continue
else:
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=time.time(),
retry_threshold=min(
static_peer_info.retry_threshold * 2,
MAXIMUM_STATIC_RETRY_FREQUENCY),
count=static_peer_info.count + 1)
else:
self._static_peer_status[endpoint] = \
StaticPeerInfo(
time=time.time(),
retry_threshold=min(
static_peer_info.retry_threshold * 2,
MAXIMUM_STATIC_RETRY_FREQUENCY),
count=0)
LOGGER.debug("attempting to peer with %s", endpoint)
self._network.add_outbound_connection(endpoint)
self._temp_endpoints[endpoint] = EndpointInfo(
EndpointStatus.PEERING,
time.time(),
INITIAL_RETRY_FREQUENCY)
for endpoint in to_remove:
self._initial_peer_endpoints.remove(endpoint)
del self._static_peer_status[endpoint]
def add_candidate_peer_endpoints(self, peer_endpoints):
with self._lock:
for endpoint in peer_endpoints:
if endpoint not in self._candidate_peer_endpoints:
self._candidate_peer_endpoints.append(endpoint)
def set_connection_status(self, connection_id, status):
self._connection_statuses[connection_id] = status
def remove_temp_endpoint(self, endpoint):
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
def _check_temp_endpoints(self):
with self._lock:
for endpoint in self._temp_endpoints:
endpoint_info = self._temp_endpoints[endpoint]
if (time.time() - endpoint_info.time) > \
endpoint_info.retry_threshold:
LOGGER.debug("Endpoint has not completed authorization in "
"%s seconds: %s",
endpoint_info.retry_threshold,
endpoint)
try:
conn_id = \
self._network.get_connection_id_by_endpoint(
endpoint)
self._network.remove_connection(conn_id)
except KeyError:
pass
self._network.add_outbound_connection(endpoint)
self._temp_endpoints[endpoint] = EndpointInfo(
endpoint_info.status,
time.time(),
min(endpoint_info.retry_threshold * 2,
MAXIMUM_RETRY_FREQUENCY))
def _refresh_peer_list(self, peers):
for conn_id in peers:
try:
self._network.get_connection_id_by_endpoint(
peers[conn_id])
except KeyError:
LOGGER.debug("removing peer %s because "
"connection went away",
peers[conn_id])
self._gossip.unregister_peer(conn_id)
if conn_id in self._connection_statuses:
del self._connection_statuses[conn_id]
def _refresh_connection_list(self):
with self._lock:
closed_connections = []
for connection_id in self._connection_statuses:
if not self._network.has_connection(connection_id):
closed_connections.append(connection_id)
for connection_id in closed_connections:
del self._connection_statuses[connection_id]
def _get_peers_of_peers(self, peers):
get_peers_request = GetPeersRequest()
for conn_id in peers:
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
get_peers_request.SerializeToString(),
conn_id)
except ValueError:
LOGGER.debug("Peer disconnected: %s", conn_id)
def _get_peers_of_endpoints(self, peers, endpoints):
get_peers_request = GetPeersRequest()
for endpoint in endpoints:
conn_id = None
try:
conn_id = self._network.get_connection_id_by_endpoint(
endpoint)
except KeyError:
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
self._temp_endpoints[endpoint] = EndpointInfo(
EndpointStatus.TOPOLOGY,
time.time(),
INITIAL_RETRY_FREQUENCY)
self._network.add_outbound_connection(endpoint)
if conn_id is not None:
if not self._network.is_connection_handshake_complete(conn_id):
continue
elif conn_id in peers:
continue
else:
# connected but not peered
if endpoint in self._temp_endpoints:
# Endpoint is not yet authorized, do not request peers
continue
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
get_peers_request.SerializeToString(),
conn_id)
except ValueError:
LOGGER.debug("Connection disconnected: %s", conn_id)
def _attempt_to_peer_with_endpoint(self, endpoint):
LOGGER.debug("Attempting to connect/peer with %s", endpoint)
# check if the connection exists, if it does - send,
# otherwise create it
try:
connection_id = \
self._network.get_connection_id_by_endpoint(
endpoint)
register_request = PeerRegisterRequest(
endpoint=self._endpoint,
protocol_version=NETWORK_PROTOCOL_VERSION)
self._network.send(
validator_pb2.Message.GOSSIP_REGISTER,
register_request.SerializeToString(),
connection_id,
callback=partial(
self._peer_callback,
endpoint=endpoint,
connection_id=connection_id))
except KeyError:
# if the connection uri wasn't found in the network's
# connections, it raises a KeyError and we need to add
# a new outbound connection
with self._lock:
self._temp_endpoints[endpoint] = EndpointInfo(
EndpointStatus.PEERING,
time.time(),
INITIAL_RETRY_FREQUENCY)
self._network.add_outbound_connection(endpoint)
def _reset_candidate_peer_endpoints(self):
with self._lock:
self._candidate_peer_endpoints = []
def _peer_callback(self, request, result, connection_id, endpoint=None):
with self._lock:
ack = NetworkAcknowledgement()
ack.ParseFromString(result.content)
if ack.status == ack.ERROR:
LOGGER.debug("Peering request to %s was NOT successful",
connection_id)
self._remove_temporary_connection(connection_id)
elif ack.status == ack.OK:
LOGGER.debug("Peering request to %s was successful",
connection_id)
if endpoint:
try:
self._gossip.register_peer(connection_id, endpoint)
self._connection_statuses[connection_id] = \
PeerStatus.PEER
self._gossip.send_block_request("HEAD", connection_id)
except PeeringException as e:
# Remove unsuccessful peer
LOGGER.warning('Unable to successfully peer with '
'connection_id: %s, due to %s',
connection_id, str(e))
self._remove_temporary_connection(connection_id)
else:
LOGGER.debug("Cannot register peer with no endpoint for "
"connection_id: %s",
connection_id)
self._remove_temporary_connection(connection_id)
def _remove_temporary_connection(self, connection_id):
status = self._connection_statuses.get(connection_id)
if status == PeerStatus.TEMP:
LOGGER.debug("Closing connection to %s", connection_id)
msg = DisconnectMessage()
try:
self._network.send(validator_pb2.Message.NETWORK_DISCONNECT,
msg.SerializeToString(),
connection_id)
except ValueError:
pass
del self._connection_statuses[connection_id]
self._network.remove_connection(connection_id)
elif status == PeerStatus.PEER:
LOGGER.debug("Connection close request for peer ignored: %s",
connection_id)
elif status is None:
LOGGER.debug("Connection close request for unknown connection "
"ignored: %s",
connection_id)
def connect_success(self, connection_id):
endpoint = self._network.connection_id_to_endpoint(connection_id)
endpoint_info = self._temp_endpoints.get(endpoint)
LOGGER.debug("Endpoint has completed authorization: %s (id: %s)",
endpoint,
connection_id)
if endpoint_info is None:
LOGGER.debug("Received unknown endpoint: %s", endpoint)
elif endpoint_info.status == EndpointStatus.PEERING:
self._connect_success_peering(connection_id, endpoint)
elif endpoint_info.status == EndpointStatus.TOPOLOGY:
self._connect_success_topology(connection_id)
else:
LOGGER.debug("Endpoint has unknown status: %s", endpoint)
with self._lock:
if endpoint in self._temp_endpoints:
del self._temp_endpoints[endpoint]
def _connect_success_peering(self, connection_id, endpoint):
LOGGER.debug("Connection to %s succeeded", connection_id)
register_request = PeerRegisterRequest(
endpoint=self._endpoint,
protocol_version=NETWORK_PROTOCOL_VERSION)
self._connection_statuses[connection_id] = PeerStatus.TEMP
try:
self._network.send(
validator_pb2.Message.GOSSIP_REGISTER,
register_request.SerializeToString(),
connection_id,
callback=partial(
self._peer_callback,
connection_id=connection_id,
endpoint=endpoint))
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id)
def _connect_success_topology(self, connection_id):
LOGGER.debug("Connection to %s succeeded for topology request",
connection_id)
self._connection_statuses[connection_id] = PeerStatus.TEMP
get_peers_request = GetPeersRequest()
def callback(request, result):
# request, result are ignored, but required by the callback
self._remove_temporary_connection(connection_id)
try:
self._network.send(
validator_pb2.Message.GOSSIP_GET_PEERS_REQUEST,
get_peers_request.SerializeToString(),
connection_id,
callback=callback)
except ValueError:
LOGGER.debug("Connection disconnected: %s", connection_id)
| true | true |
f7248bd68fe7de142bd9557e71df9eec370b4208 | 4,604 | py | Python | colossalai/nn/optimizer/lamb.py | xdjiangkai/ColossalAI | 4a3d3446b04065fa1c89b78cba673e96115c6325 | [
"Apache-2.0"
] | 1 | 2022-03-12T04:49:19.000Z | 2022-03-12T04:49:19.000Z | colossalai/nn/optimizer/lamb.py | xdjiangkai/ColossalAI | 4a3d3446b04065fa1c89b78cba673e96115c6325 | [
"Apache-2.0"
] | null | null | null | colossalai/nn/optimizer/lamb.py | xdjiangkai/ColossalAI | 4a3d3446b04065fa1c89b78cba673e96115c6325 | [
"Apache-2.0"
] | 1 | 2022-01-06T17:16:32.000Z | 2022-01-06T17:16:32.000Z | """
Adapted from the pytorch-lamb library at https://github.com/cybertronai/pytorch-lamb
"""
import torch
from torch.optim import Optimizer
from colossalai.registry import OPTIMIZERS
@OPTIMIZERS.register_module
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning\: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
# * math.sqrt(bias_correction2) / bias_correction1
step_size = group['lr']
weight_norm = p.data.pow(2).sum().sqrt()
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(p.data, alpha=group['weight_decay'])
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
| 39.350427 | 103 | 0.553649 |
import torch
from torch.optim import Optimizer
from colossalai.registry import OPTIMIZERS
@OPTIMIZERS.register_module
class Lamb(Optimizer):
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError(
"Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError(
"Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
'Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
step_size = group['lr']
weight_norm = p.data.pow(2).sum().sqrt()
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(p.data, alpha=group['weight_decay'])
adam_norm = adam_step.pow(2).sum().sqrt()
if weight_norm == 0 or adam_norm == 0:
trust_ratio = 1
else:
trust_ratio = weight_norm / adam_norm
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(adam_step, alpha=-step_size * trust_ratio)
return loss
| true | true |
f7248e344028eb2f0e09ee718bbf90134b69c45e | 29,546 | py | Python | pyqubo/array.py | OpenJij/pyqubo | 47190d3391c83c1c84636ab8f8bff67c8f935dc0 | [
"Apache-2.0"
] | null | null | null | pyqubo/array.py | OpenJij/pyqubo | 47190d3391c83c1c84636ab8f8bff67c8f935dc0 | [
"Apache-2.0"
] | null | null | null | pyqubo/array.py | OpenJij/pyqubo | 47190d3391c83c1c84636ab8f8bff67c8f935dc0 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Recruit Communications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .core import Spin, Binary, Express
import dimod
from dimod.decorators import vartype_argument
import numpy as np
from operator import mul, add
from six.moves import reduce
class Array:
"""Multi-dimensional array.
Args:
bit_list (list/:class:`numpy.ndarray`): The object from which a new array is created.
Accepted input:
* (Nested) list of :class:`Express`, :class:`Array`, int or float.
* numpy.ndarray
Attributes:
shape (tuple[int]): Shape of this array.
Example:
Create a new array with Binary.
>>> from pyqubo import Array, Binary
>>> Array.create('x', shape=(2, 2), vartype='BINARY')
Array([[Binary(x[0][0]), Binary(x[0][1])],
[Binary(x[1][0]), Binary(x[1][1])]])
Create a new array from a nested list of :class:`Express`.
>>> array = Array([[Binary('x0'), Binary('x1')], [Binary('x2'), Binary('x3')]])
>>> array
Array([[Binary(x0), Binary(x1)],
[Binary(x2), Binary(x3)]])
Get the shape of the array.
>>> array.shape
(2, 2)
Access an element with index.
>>> array[0, 0] # = array[(0, 0)]
Binary(x0)
Use slice ":" to select a subset of the array.
>>> array[:, 1] # = array[(slice(None), 1)]
Array([Binary(x1), Binary(x3)])
>>> sum(array[:, 1])
(Binary(x1)+Binary(x3))
Use list or tuple to select a subset of the array.
>>> array[[0, 1], 1]
Array([Binary(x1), Binary(x3)])
>>> array[(0, 1), 1]
Array([Binary(x1), Binary(x3)])
Create an array from numpy array.
>>> import numpy as np
>>> Array(np.array([[1, 2], [3, 4]]))
Array([[1, 2],
[3, 4]])
Create an array from list of :class:`Array`.
>>> Array([Array([1, 2]), Array([3, 4])])
Array([[1, 2],
[3, 4]])
"""
def __init__(self, bit_list):
if isinstance(bit_list, np.ndarray):
self.shape = bit_list.shape
self.bit_list = bit_list.tolist()
elif isinstance(bit_list, list):
def get_shape(l):
if isinstance(l, list) or isinstance(l, Array) or isinstance(l, np.ndarray):
length = len(l)
shape_set = {get_shape(e) for e in l}
if len(shape_set) == 1:
sub_shape = shape_set.pop()
return tuple([length] + list(sub_shape))
else:
raise ValueError('Cannot determine the shape of input nested list.')
else:
return tuple()
def normalize_type(l):
if isinstance(l, list):
return [normalize_type(e) for e in l]
elif isinstance(l, Array):
return [normalize_type(e) for e in l.bit_list]
elif isinstance(l, np.ndarray):
return [normalize_type(e) for e in l.tolist()]
else:
return l
self.shape = get_shape(bit_list)
self.bit_list = normalize_type(bit_list)
else:
raise TypeError('argument should be ndarray or list')
def __len__(self):
return self.shape[0]
def __getitem__(self, key):
"""Get a subset of this array.
Args:
key (int/tuple[int]): Index of array.
Returns:
:class:`Express`/:class:`Array`/int/float
Example:
>>> array = Array.create('x', (2, 3, 2), 'BINARY')
>>> array
Array([[[Binary(x[0][0][0]), Binary(x[0][0][1])],
[Binary(x[0][1][0]), Binary(x[0][1][1])],
[Binary(x[0][2][0]), Binary(x[0][2][1])]],
[[Binary(x[1][0][0]), Binary(x[1][0][1])],
[Binary(x[1][1][0]), Binary(x[1][1][1])],
[Binary(x[1][2][0]), Binary(x[1][2][1])]]])
>>> array[0, 1, 1]
Binary(x[0][1][1])
>>> array[:, :, 1]
"""
if isinstance(key, int):
key = key,
elif not isinstance(key, tuple):
raise TypeError("Key should be int or tuple of int")
def get_item(l, index):
if len(index) > 1:
current_index = index[0]
if isinstance(current_index, int):
return get_item(l[current_index], index[1:])
elif isinstance(current_index, list) or isinstance(current_index, tuple):
return [get_item(l[i], index[1:]) for i in current_index]
else:
return [get_item(e, index[1:]) for e in l[current_index]]
else:
return l[index[0]]
item = get_item(self.bit_list, key)
if isinstance(item, list):
return Array(item)
else:
return item
def __repr__(self):
nest_depth = len(self.shape)
offset = len("Array(")
def format_nested_list(nested_list, nest_count):
if isinstance(nested_list[0], list):
return '[{body}]'.format(
body=',{line_feed}{indent}'.format(
indent=' ' * (nest_count + offset),
line_feed='\n' * (nest_depth - nest_count)
).join([format_nested_list(sub_list, nest_count+1) for sub_list in nested_list])
)
else:
return '[%s]' % ', '.join(map(str, nested_list))
return 'Array({body})'.format(body=format_nested_list(self.bit_list, 1))
def __eq__(self, other):
if not isinstance(other, Array):
return False
else:
return self.bit_list == other.bit_list
def __ne__(self, other):
return not self.__eq__(other)
# math operation
def __neg__(self):
minus_one = Array.fill(-1, self.shape)
return self * minus_one
def __radd__(self, other):
"""It is called when `other(number) + self`"""
return self.__add__(other)
def __add__(self, other):
"""It is called when `self + other(any object)`"""
return self.add(other)
def __rsub__(self, other):
"""It is called when `other(number) - self`"""
return (-self).add(other)
def __sub__(self, other):
"""It is called when `self - other(any object)`"""
return self.subtract(other)
def __rmul__(self, other):
"""It is called when `other(number) * self`"""
return self.__mul__(other)
def __mul__(self, other):
"""It is called when `self * other(any object)`"""
return self.mul(other)
def __div__(self, other):
"""It is called when `self / other(any object)`"""
return self.div(other)
def __rdiv__(self, other):
"""It is called when `other(number) / self`"""
raise ValueError("Number cannot be divided by Expression.")
def __truediv__(self, other): # pragma: no cover
"""division in Python3"""
return self.__div__(other)
def __rtruediv__(self, other): # pragma: no cover
"""It is called when `other(number) / self`"""
return self.__rdiv__(other)
def __matmul__(self, other): # pragma: no cover
return self.matmul(other)
def add(self, other):
"""Returns a sum of self and other.
Args:
other (:class:`Array`/:class:`ndarray`/int/float): Addend.
Returns:
:class:`Array`
Example:
>>> from pyqubo import Array, Binary
>>> import numpy as np
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]])
>>> array_b = Array([[Binary('d'), 1], [Binary('f'), Binary('g')]])
>>> array_a.add(array_b)
Array([[(Binary(a)+Binary(d)), (Binary(b)+Num(1))],
[(Binary(c)+Binary(f)), (Binary(g)+Num(2))]])
>>> array_a + array_b
Array([[(Binary(a)+Binary(d)), (Binary(b)+Num(1))],
[(Binary(c)+Binary(f)), (Binary(g)+Num(2))]])
Sum of self and scalar value.
>>> array_a + 5
Array([[(Binary(a)+Num(5)), (Binary(b)+Num(5))],
[(Binary(c)+Num(5)), 7]])
Sum of self and numpy ndarray.
>>> array_a + np.array([[1, 2], [3, 4]])
Array([[(Binary(a)+Num(1)), (Binary(b)+Num(2))],
[(Binary(c)+Num(3)), 6]])
"""
return self._pairwise_op_with_type_check(other, lambda x, y: x + y)
def subtract(self, other):
"""Returns a difference between other and self.
Args:
other (:class:`Array`/:class:`ndarray`/int/float): Subtrahend.
Returns:
:class:`Array`
Example:
>>> from pyqubo import Array, Binary
>>> import numpy as np
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]])
>>> array_b = Array([[Binary('d'), 1], [Binary('f'), Binary('g')]])
>>> array_a.subtract(array_b)
Array([[(Binary(a)+(Binary(d)*Num(-1))), (Binary(b)+Num(-1))],
[(Binary(c)+(Binary(f)*Num(-1))), ((Binary(g)*Num(-1))+Num(2))]])
>>> array_a - array_b
Array([[(Binary(a)+(Binary(d)*Num(-1))), (Binary(b)+Num(-1))],
[(Binary(c)+(Binary(f)*Num(-1))), ((Binary(g)*Num(-1))+Num(2))]])
Difference of self and scalar value.
>>> array_a - 5
Array([[(Binary(a)+Num(-5)), (Binary(b)+Num(-5))],
[(Binary(c)+Num(-5)), -3]])
Difference of self and numpy ndarray.
>>> array_a - np.array([[1, 2], [3, 4]])
Array([[(Binary(a)+Num(-1)), (Binary(b)+Num(-2))],
[(Binary(c)+Num(-3)), -2]])
"""
return self._pairwise_op_with_type_check(other, lambda x, y: x - y)
def mul(self, other):
"""Returns a multiplicity of self by other.
Args:
other (:class:`Array`/:class:`ndarray`/int/float): Factor.
Returns:
:class:`Array`
Example:
>>> from pyqubo import Array, Binary
>>> import numpy as np
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]])
>>> array_b = Array([[Binary('d'), 1], [Binary('f'), Binary('g')]])
>>> array_a.mul(array_b)
Array([[(Binary(a)*Binary(d)), (Binary(b)*Num(1))],
[(Binary(c)*Binary(f)), (Binary(g)*Num(2))]])
>>> array_a * array_b
Array([[(Binary(a)*Binary(d)), (Binary(b)*Num(1))],
[(Binary(c)*Binary(f)), (Binary(g)*Num(2))]])
Product of self and scalar value.
>>> array_a * 5
Array([[(Binary(a)*Num(5)), (Binary(b)*Num(5))],
[(Binary(c)*Num(5)), 10]])
Product of self and numpy ndarray.
>>> array_a * np.array([[1, 2], [3, 4]])
Array([[(Binary(a)*Num(1)), (Binary(b)*Num(2))],
[(Binary(c)*Num(3)), 8]])
"""
return self._pairwise_op_with_type_check(other, lambda x, y: x * y)
def div(self, other):
"""Returns division of self by other.
Args:
other (int/float): Divisor.
Returns:
:class:`Array`
Example:
>>> from pyqubo import Array, Binary
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), 2]])
>>> array_a / 5
Array([[(Binary(a)*Num(0.2)), (Binary(b)*Num(0.2))],
[(Binary(c)*Num(0.2)), 0.4]])
"""
if not isinstance(other, Array):
return self * (other ** -1)
else:
raise ValueError("Expression cannot be divided by Expression.")
@staticmethod
@vartype_argument('vartype')
def create(name, shape, vartype):
"""Create a new array with Spins or Binary.
Args:
name (str): Name of the matrix. It is used as a part of the label of variables.
For example, if the name is 'x',
the label of `(i, j)` th variable will be ``x[i][j]``.
shape (int/tuple[int]): Dimensions of the array.
vartype (:class:`dimod.Vartype`/str/set, optional):
Variable type of the solution.
Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
Example:
>>> from pyqubo import Array
>>> array = Array.create('x', shape=(2, 2), vartype='BINARY')
>>> array
Array([[Binary(x[0][0]), Binary(x[0][1])],
[Binary(x[1][0]), Binary(x[1][1])]])
>>> array[0]
Array([Binary(x[0][0]), Binary(x[0][1])])
"""
if isinstance(shape, int):
shape = shape,
if vartype == dimod.BINARY:
var_class = Binary
else:
var_class = Spin
def var_name(_name, index):
return "{name}{index_repr}".format(
name=_name, index_repr=''.join(['[%d]' % i for i in index]))
def create_structure(index):
return {var_name(name, index): tuple([name] + index)}
def generator(index):
return var_class(var_name(name, index), create_structure(index))
return Array._create_with_generator(shape, generator)
@staticmethod
def fill(obj, shape):
"""Create a new array with the given shape, all filled with the given object.
Args:
obj (int/float/:class:`Express`): The object with which a new array is filled.
shape (tuple[int]): Shape of the array.
Returns:
:class:`Array`: Created array.
Example:
>>> from pyqubo import Array, Binary
>>> Array.fill(Binary('a'), shape=(2, 3))
Array([[Binary(a), Binary(a), Binary(a)],
[Binary(a), Binary(a), Binary(a)]])
"""
return Array._create_with_generator(shape, lambda _: obj)
@staticmethod
def _create_with_generator(shape, generator):
"""Returns an array with objects which `generator` created.
Args:
shape (tuple[int]): Shape of the array.
generator (list[int] =>:class:`Express`): Function to generate :class:`Express`:.
Type of the argument of the generator is ``list[int]``.
Returns:
:class:`Array`: Created array.
"""
_shape_list = list(shape)
def create_internal(shape_list, index):
if len(shape_list) > 1:
length = shape_list[0]
return [create_internal(shape_list[1:], index + [i]) for i in range(length)]
else:
length = shape_list[0]
return [generator(index+[i]) for i in range(length)]
return Array(create_internal(_shape_list, []))
def _pairwise_op_with_type_check(self, other, operation):
"""Pairwise operation with type check.
Args:
other (:class:`Array`/:class:`ndarray`/int/float): The other object in operation.
operation (:class:`Express`, :class:`Express` => :class:`Express`): Operation.
Returns:
:class:`Array`
"""
if isinstance(other, np.ndarray):
other = Array(other)
elif isinstance(other, int) or isinstance(other, float) or isinstance(other, Express):
other = Array.fill(other, self.shape)
elif not isinstance(other, Array):
raise TypeError('Operation of Array cannot be done with type:{type}'
.format(type=type(other)))
return self._pairwise_op(other, operation)
def _pairwise_op(self, other, operation):
"""Pairwise operation
Args:
other (:class:`Array`): The other object in operation.
operation (:class:`Express`, :class:`Express` => :class:`Express`): Operation
Returns:
:class:`Array`
"""
if not isinstance(other, Array): # pragma: no cover
raise TypeError('Type of `other` is not a `Array` instance.')
elif not self.shape == other.shape:
raise ValueError('Shape of other is not same as that of self.')
else:
def operate(l1, l2):
if isinstance(l1, list):
return [operate(e1, e2) for e1, e2 in zip(l1, l2)]
else:
return operation(l1, l2)
return Array(operate(self.bit_list, other.bit_list))
@property
def T(self):
"""Returns a transposed array.
Example:
>>> from pyqubo import Array
>>> array = Array.create('x', shape=(2, 3), vartype='BINARY')
>>> array
Array([[Binary(x[0][0]), Binary(x[0][1]), Binary(x[0][2])],
[Binary(x[1][0]), Binary(x[1][1]), Binary(x[1][2])]])
>>> array.T
Array([[Binary(x[0][0]), Binary(x[1][0])],
[Binary(x[0][1]), Binary(x[1][1])],
[Binary(x[0][2]), Binary(x[1][2])]])
"""
def generator(index):
return self[tuple(index[::-1])]
return Array._create_with_generator(self.shape[::-1], generator)
def dot(self, other):
"""Returns a dot product of two arrays.
Args:
other (:class:`Array`): Array.
Returns:
:class:`Express`/:class:`Array`
Example:
Dot calculation falls into four patterns.
1. If both `self` and `other` are 1-D arrays, it is inner product of vectors.
>>> from pyqubo import Array, Binary
>>> array_a = Array([Binary('a'), Binary('b')])
>>> array_b = Array([Binary('c'), Binary('d')])
>>> array_a.dot(array_b)
((Binary(a)*Binary(c))+(Binary(b)*Binary(d)))
2. If `self` is an N-D array and `other` is a 1-D array,\
it is a sum product over the last axis of `self` and `other`.
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]])
>>> array_b = Array([Binary('e'), Binary('f')])
>>> array_a.dot(array_b)
Array([((Binary(a)*Binary(e))+(Binary(b)*Binary(f))), \
((Binary(c)*Binary(e))+(Binary(d)*Binary(f)))])
3. If both `self` and `other` are 2-D arrays, it is matrix multiplication.
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]])
>>> array_b = Array([[Binary('e'), Binary('f')], [Binary('g'), Binary('h')]])
>>> array_a.dot(array_b)
Array([[((Binary(a)*Binary(e))+(Binary(b)*Binary(g))), \
((Binary(a)*Binary(f))+(Binary(b)*Binary(h)))],
[((Binary(c)*Binary(e))+(Binary(d)*Binary(g))), \
((Binary(c)*Binary(f))+(Binary(d)*Binary(h)))]])
4. If `self` is an N-D array and `other` is an M-D array (where N, M>=2),\
it is a sum product over the last axis of `self` and\
the second-to-last axis of `other`. If N = M = 3,\
(i, j, k, m) element of a dot product of `self` and `other` is:
.. code-block:: python
dot(self, other)[i,j,k,m] = sum(self[i,j,:] * other[k,:,m])
>>> array_a = Array.create('a', shape=(3, 2, 4), vartype='BINARY')
>>> array_a.shape
(3, 2, 4)
>>> array_b = Array.create('b', shape=(5, 4, 3), vartype='BINARY')
>>> array_b.shape
(5, 4, 3)
>>> i, j, k, m = (1, 1, 3, 2)
>>> array_a.dot(array_b)[i, j, k, m] == sum(array_a[i, j, :] * array_b[k, :, m])
True
Dot product with list.
>>> array_a = Array([Binary('a'), Binary('b')])
>>> array_b = [3, 4]
>>> array_a.dot(array_b)
((Binary(a)*Num(3))+(Binary(b)*Num(4)))
"""
if isinstance(other, np.ndarray) or isinstance(other, list):
other = Array(other)
if not isinstance(other, Array):
raise TypeError("Type of argument should be Array")
# pattern 1 (see docstring)
if len(self.shape) == 1 and len(other.shape) == 1 and self.shape[0] == other.shape[0]:
return sum(self.mul(other))
# pattern 2
elif len(self.shape) == 2 and len(other.shape) == 1:
return Array([sum(v * other) for v in self])
# pattern 3 and 4
else:
return self._dot_matrix(other)
def _dot_matrix(self, other):
"""Returns a dot product of N-D array self and M-D array other (where N, M>=2).
"""
assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other))
assert self.shape[-1] == other.shape[-2],\
"self.shape[-1] should be equal other.shape[-2].\n" +\
"For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html"
vector_indices = slice(0, self.shape[-1], None)
new_shape = self.shape[:-1] + other.shape[:-2] + (other.shape[-1],)
def generator(index):
half = len(self.shape) - 1
index_self = tuple(index[:half]) + (vector_indices,)
index_other = tuple(index[half:-1]) + (vector_indices,) + (index[-1],)
vector_self = self[index_self]
vector_other = other[index_other]
return sum(vector_self * vector_other)
return Array._create_with_generator(new_shape, generator)
def matmul(self, other):
"""Returns a matrix product of two arrays.
Note:
You can use operator symbol '@' instead of :obj:`matmul()`
in Python 3.5 or later version.
>>> from pyqubo import Array
>>> array_a = Array.create('a', shape=(2, 4), vartype='BINARY')
>>> array_b = Array.create('b', shape=(4, 3), vartype='BINARY')
>>> array_a @ array_b == array_a.matmul(array_b)
True
Args:
other (:class:`Array`/:class:`numpy.ndarray`/list):
Returns:
:class:`Array`/:class:`Express`
Example:
Matrix product of two arrays falls into 3 patterns.
1. If either of the arguments is 1-D array,
it is treated as a matrix where one is added to its dimension.
>>> from pyqubo import Array, Binary
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]])
>>> array_b = Array([Binary('e'), Binary('f')])
>>> array_a.matmul(array_b)
Array([((Binary(a)*Binary(e))+(Binary(b)*Binary(f))), \
((Binary(c)*Binary(e))+(Binary(d)*Binary(f)))])
2. If both arguments are 2-D array, conventional matrix product is calculated.
>>> array_a = Array([[Binary('a'), Binary('b')], [Binary('c'), Binary('d')]])
>>> array_b = Array([[Binary('e'), Binary('f')], [Binary('g'), Binary('h')]])
>>> array_a.matmul(array_b)
Array([[((Binary(a)*Binary(e))+(Binary(b)*Binary(g))), \
((Binary(a)*Binary(f))+(Binary(b)*Binary(h)))],
[((Binary(c)*Binary(e))+(Binary(d)*Binary(g))), \
((Binary(c)*Binary(f))+(Binary(d)*Binary(h)))]])
3. If either argument is N-D (where N > 2), it is treated as an array whose element is a
2-D matrix of last two indices. In this example, `array_a` is treated as if
it is a vector whose elements are two matrices of shape (2, 3).
>>> array_a = Array.create('a', shape=(2, 2, 3), vartype='BINARY')
>>> array_b = Array.create('b', shape=(3, 2), vartype='BINARY')
>>> (array_a @ array_b)[0] == array_a[0].matmul(array_b)
True
"""
if isinstance(other, np.ndarray) or isinstance(other, list):
other = Array(other)
assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other))
# pattern 1 (see docstring)
if len(self.shape) == 1 or len(other.shape) == 1:
return self.dot(other)
# pattern 2 and 3
else:
return self._matmul_matrix(other)
def _matmul_matrix(self, other):
assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other))
assert len(self.shape) >= 2 and len(other.shape) >= 2, "Shape should be greater than 2"
assert self.shape[-1] == other.shape[-2], \
"self.shape[-1] should be equal other.shape[-2].\n" + \
"For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html"
self_shape_len = len(self.shape)
other_shape_len = len(other.shape)
common_len = min(self_shape_len, other_shape_len)
for s1, s2 in zip(self.shape[-common_len:-2], other.shape[-common_len:-2]):
assert s1 == s2, "Shape doesn't match."
longer_shape = self.shape if self_shape_len > other_shape_len else other.shape
new_shape = longer_shape[:-2] + (self.shape[-2], other.shape[-1])
def generator(index):
mat_index_self = tuple(index[-self_shape_len:][:-2])
mat_index_other = tuple(index[-other_shape_len:][:-2])
mat_self = self[mat_index_self] if mat_index_self != () else self
mat_other = other[mat_index_other] if mat_index_other != () else other
j = index[-1]
i = index[-2]
return mat_self[i, :].dot(mat_other[:, j])
return Array._create_with_generator(new_shape, generator)
@staticmethod
def _calc_steps(shape):
"""Returns steps of shape.
Step is used to create an 1-dim index from n-dim index like
>>> steps = Array._calc_steps(shape)
>>> one_dim_index = sum(step * i for step, i in zip(steps, n_dim_index))
"""
steps = []
tmp_d = 1
for d in shape[::-1]:
steps.append(tmp_d)
tmp_d *= d
steps = steps[::-1]
return steps
def reshape(self, new_shape):
"""Returns a reshaped array.
Args:
new_shape (tuple[int]): New shape.
Example:
>>> from pyqubo import Array
>>> array = Array.create('x', shape=(2, 3), vartype='BINARY')
>>> array
Array([[Binary(x[0][0]), Binary(x[0][1]), Binary(x[0][2])],
[Binary(x[1][0]), Binary(x[1][1]), Binary(x[1][2])]])
>>> array.reshape((3, 2, 1))
Array([[[Binary(x[0][0])],
[Binary(x[0][1])]],\
[[Binary(x[0][2])],
[Binary(x[1][0])]],\
[[Binary(x[1][1])],
[Binary(x[1][2])]]])
"""
assert reduce(mul, self.shape) == reduce(mul, new_shape),\
"cannot reshape array of size {p} into shape {new_shape}".format(
p=reduce(mul, self.shape), new_shape=new_shape)
def calc_one_dim_array(nested_list):
if isinstance(nested_list, list):
return reduce(add, [calc_one_dim_array(e) for e in nested_list])
else:
return [nested_list]
# create an 1-dim array from the n-dim array
one_dim_array = calc_one_dim_array(self.bit_list)
new_steps = Array._calc_steps(new_shape)
def generator(index):
# create an index for 1-dim array from the given index
one_dim_index = sum(step * i for step, i in zip(new_steps, index))
return one_dim_array[one_dim_index]
return Array._create_with_generator(new_shape, generator)
| 36.978723 | 100 | 0.504874 |
from .core import Spin, Binary, Express
import dimod
from dimod.decorators import vartype_argument
import numpy as np
from operator import mul, add
from six.moves import reduce
class Array:
def __init__(self, bit_list):
if isinstance(bit_list, np.ndarray):
self.shape = bit_list.shape
self.bit_list = bit_list.tolist()
elif isinstance(bit_list, list):
def get_shape(l):
if isinstance(l, list) or isinstance(l, Array) or isinstance(l, np.ndarray):
length = len(l)
shape_set = {get_shape(e) for e in l}
if len(shape_set) == 1:
sub_shape = shape_set.pop()
return tuple([length] + list(sub_shape))
else:
raise ValueError('Cannot determine the shape of input nested list.')
else:
return tuple()
def normalize_type(l):
if isinstance(l, list):
return [normalize_type(e) for e in l]
elif isinstance(l, Array):
return [normalize_type(e) for e in l.bit_list]
elif isinstance(l, np.ndarray):
return [normalize_type(e) for e in l.tolist()]
else:
return l
self.shape = get_shape(bit_list)
self.bit_list = normalize_type(bit_list)
else:
raise TypeError('argument should be ndarray or list')
def __len__(self):
return self.shape[0]
def __getitem__(self, key):
if isinstance(key, int):
key = key,
elif not isinstance(key, tuple):
raise TypeError("Key should be int or tuple of int")
def get_item(l, index):
if len(index) > 1:
current_index = index[0]
if isinstance(current_index, int):
return get_item(l[current_index], index[1:])
elif isinstance(current_index, list) or isinstance(current_index, tuple):
return [get_item(l[i], index[1:]) for i in current_index]
else:
return [get_item(e, index[1:]) for e in l[current_index]]
else:
return l[index[0]]
item = get_item(self.bit_list, key)
if isinstance(item, list):
return Array(item)
else:
return item
def __repr__(self):
nest_depth = len(self.shape)
offset = len("Array(")
def format_nested_list(nested_list, nest_count):
if isinstance(nested_list[0], list):
return '[{body}]'.format(
body=',{line_feed}{indent}'.format(
indent=' ' * (nest_count + offset),
line_feed='\n' * (nest_depth - nest_count)
).join([format_nested_list(sub_list, nest_count+1) for sub_list in nested_list])
)
else:
return '[%s]' % ', '.join(map(str, nested_list))
return 'Array({body})'.format(body=format_nested_list(self.bit_list, 1))
def __eq__(self, other):
if not isinstance(other, Array):
return False
else:
return self.bit_list == other.bit_list
def __ne__(self, other):
return not self.__eq__(other)
def __neg__(self):
minus_one = Array.fill(-1, self.shape)
return self * minus_one
def __radd__(self, other):
return self.__add__(other)
def __add__(self, other):
return self.add(other)
def __rsub__(self, other):
return (-self).add(other)
def __sub__(self, other):
return self.subtract(other)
def __rmul__(self, other):
return self.__mul__(other)
def __mul__(self, other):
return self.mul(other)
def __div__(self, other):
return self.div(other)
def __rdiv__(self, other):
raise ValueError("Number cannot be divided by Expression.")
def __truediv__(self, other):
return self.__div__(other)
def __rtruediv__(self, other):
return self.__rdiv__(other)
def __matmul__(self, other):
return self.matmul(other)
def add(self, other):
return self._pairwise_op_with_type_check(other, lambda x, y: x + y)
def subtract(self, other):
return self._pairwise_op_with_type_check(other, lambda x, y: x - y)
def mul(self, other):
return self._pairwise_op_with_type_check(other, lambda x, y: x * y)
def div(self, other):
if not isinstance(other, Array):
return self * (other ** -1)
else:
raise ValueError("Expression cannot be divided by Expression.")
@staticmethod
@vartype_argument('vartype')
def create(name, shape, vartype):
if isinstance(shape, int):
shape = shape,
if vartype == dimod.BINARY:
var_class = Binary
else:
var_class = Spin
def var_name(_name, index):
return "{name}{index_repr}".format(
name=_name, index_repr=''.join(['[%d]' % i for i in index]))
def create_structure(index):
return {var_name(name, index): tuple([name] + index)}
def generator(index):
return var_class(var_name(name, index), create_structure(index))
return Array._create_with_generator(shape, generator)
@staticmethod
def fill(obj, shape):
return Array._create_with_generator(shape, lambda _: obj)
@staticmethod
def _create_with_generator(shape, generator):
_shape_list = list(shape)
def create_internal(shape_list, index):
if len(shape_list) > 1:
length = shape_list[0]
return [create_internal(shape_list[1:], index + [i]) for i in range(length)]
else:
length = shape_list[0]
return [generator(index+[i]) for i in range(length)]
return Array(create_internal(_shape_list, []))
def _pairwise_op_with_type_check(self, other, operation):
if isinstance(other, np.ndarray):
other = Array(other)
elif isinstance(other, int) or isinstance(other, float) or isinstance(other, Express):
other = Array.fill(other, self.shape)
elif not isinstance(other, Array):
raise TypeError('Operation of Array cannot be done with type:{type}'
.format(type=type(other)))
return self._pairwise_op(other, operation)
def _pairwise_op(self, other, operation):
if not isinstance(other, Array):
raise TypeError('Type of `other` is not a `Array` instance.')
elif not self.shape == other.shape:
raise ValueError('Shape of other is not same as that of self.')
else:
def operate(l1, l2):
if isinstance(l1, list):
return [operate(e1, e2) for e1, e2 in zip(l1, l2)]
else:
return operation(l1, l2)
return Array(operate(self.bit_list, other.bit_list))
@property
def T(self):
def generator(index):
return self[tuple(index[::-1])]
return Array._create_with_generator(self.shape[::-1], generator)
def dot(self, other):
if isinstance(other, np.ndarray) or isinstance(other, list):
other = Array(other)
if not isinstance(other, Array):
raise TypeError("Type of argument should be Array")
if len(self.shape) == 1 and len(other.shape) == 1 and self.shape[0] == other.shape[0]:
return sum(self.mul(other))
elif len(self.shape) == 2 and len(other.shape) == 1:
return Array([sum(v * other) for v in self])
else:
return self._dot_matrix(other)
def _dot_matrix(self, other):
assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other))
assert self.shape[-1] == other.shape[-2],\
"self.shape[-1] should be equal other.shape[-2].\n" +\
"For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html"
vector_indices = slice(0, self.shape[-1], None)
new_shape = self.shape[:-1] + other.shape[:-2] + (other.shape[-1],)
def generator(index):
half = len(self.shape) - 1
index_self = tuple(index[:half]) + (vector_indices,)
index_other = tuple(index[half:-1]) + (vector_indices,) + (index[-1],)
vector_self = self[index_self]
vector_other = other[index_other]
return sum(vector_self * vector_other)
return Array._create_with_generator(new_shape, generator)
def matmul(self, other):
if isinstance(other, np.ndarray) or isinstance(other, list):
other = Array(other)
assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other))
if len(self.shape) == 1 or len(other.shape) == 1:
return self.dot(other)
else:
return self._matmul_matrix(other)
def _matmul_matrix(self, other):
assert isinstance(other, Array), "Type should be Array, not {type}".format(type=type(other))
assert len(self.shape) >= 2 and len(other.shape) >= 2, "Shape should be greater than 2"
assert self.shape[-1] == other.shape[-2], \
"self.shape[-1] should be equal other.shape[-2].\n" + \
"For more details, see https://pyqubo.readthedocs.io/en/latest/reference/array.html"
self_shape_len = len(self.shape)
other_shape_len = len(other.shape)
common_len = min(self_shape_len, other_shape_len)
for s1, s2 in zip(self.shape[-common_len:-2], other.shape[-common_len:-2]):
assert s1 == s2, "Shape doesn't match."
longer_shape = self.shape if self_shape_len > other_shape_len else other.shape
new_shape = longer_shape[:-2] + (self.shape[-2], other.shape[-1])
def generator(index):
mat_index_self = tuple(index[-self_shape_len:][:-2])
mat_index_other = tuple(index[-other_shape_len:][:-2])
mat_self = self[mat_index_self] if mat_index_self != () else self
mat_other = other[mat_index_other] if mat_index_other != () else other
j = index[-1]
i = index[-2]
return mat_self[i, :].dot(mat_other[:, j])
return Array._create_with_generator(new_shape, generator)
@staticmethod
def _calc_steps(shape):
steps = []
tmp_d = 1
for d in shape[::-1]:
steps.append(tmp_d)
tmp_d *= d
steps = steps[::-1]
return steps
def reshape(self, new_shape):
assert reduce(mul, self.shape) == reduce(mul, new_shape),\
"cannot reshape array of size {p} into shape {new_shape}".format(
p=reduce(mul, self.shape), new_shape=new_shape)
def calc_one_dim_array(nested_list):
if isinstance(nested_list, list):
return reduce(add, [calc_one_dim_array(e) for e in nested_list])
else:
return [nested_list]
# create an 1-dim array from the n-dim array
one_dim_array = calc_one_dim_array(self.bit_list)
new_steps = Array._calc_steps(new_shape)
def generator(index):
# create an index for 1-dim array from the given index
one_dim_index = sum(step * i for step, i in zip(new_steps, index))
return one_dim_array[one_dim_index]
return Array._create_with_generator(new_shape, generator)
| true | true |
f7248e4c92268c1a8e4f9a3e78264a4a28b054ff | 533 | py | Python | scripts/flash/plot_hitscores.py | JunCEEE/hummingbird | 0b1bdf5023b92090f31d9bc857e0854a805cf2cd | [
"BSD-2-Clause"
] | 14 | 2016-02-18T23:10:12.000Z | 2021-07-30T09:19:56.000Z | scripts/flash/plot_hitscores.py | JunCEEE/hummingbird | 0b1bdf5023b92090f31d9bc857e0854a805cf2cd | [
"BSD-2-Clause"
] | 66 | 2015-11-18T15:39:45.000Z | 2015-12-06T16:06:20.000Z | scripts/flash/plot_hitscores.py | JunCEEE/hummingbird | 0b1bdf5023b92090f31d9bc857e0854a805cf2cd | [
"BSD-2-Clause"
] | 13 | 2016-07-07T13:15:52.000Z | 2021-11-10T11:56:13.000Z | #!/usr/bin/env python
import h5py
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
runnr = int(sys.argv[1])
filename = '/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird/r%04d_ol1.h5' %runnr
with h5py.File(filename, 'r') as f:
hitscore = f['entry_1/result_1/hitscore_litpixel'][:]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(hitscore, 'k.')
#ax.axhline(int(sys.argv[2]))
fig.savefig('../plots/r%04d_hitscore.png' %runnr, dpi=100, bbox_inches='tight')
| 28.052632 | 95 | 0.729831 |
import h5py
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
runnr = int(sys.argv[1])
filename = '/asap3/flash/gpfs/bl1/2017/data/11001733/processed/hummingbird/r%04d_ol1.h5' %runnr
with h5py.File(filename, 'r') as f:
hitscore = f['entry_1/result_1/hitscore_litpixel'][:]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(hitscore, 'k.')
fig.savefig('../plots/r%04d_hitscore.png' %runnr, dpi=100, bbox_inches='tight')
| true | true |
f7248ee621042e30291d461ffdf3dcab8f265bba | 106,231 | py | Python | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2021-09-07T18:39:05.000Z | 2021-09-07T18:39:05.000Z | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/compute/azure-mgmt-compute/azure/mgmt/compute/v2021_04_01/aio/operations/_virtual_machines_operations.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-03-04T06:21:56.000Z | 2022-03-04T06:21:56.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machines_operations import build_assess_patches_request_initial, build_capture_request_initial, build_convert_to_managed_disks_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_request, build_install_patches_request_initial, build_instance_view_request, build_list_all_request, build_list_available_sizes_request, build_list_by_location_request, build_list_request, build_perform_maintenance_request_initial, build_power_off_request_initial, build_reapply_request_initial, build_redeploy_request_initial, build_reimage_request_initial, build_restart_request_initial, build_retrieve_boot_diagnostics_data_request, build_run_command_request_initial, build_simulate_eviction_request, build_start_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
"""VirtualMachinesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2021_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_location(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Gets all the virtual machines under the specified subscription for the specified location.
:param location: The location for which virtual machines under the subscription are queried.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines'} # type: ignore
async def _capture_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineCaptureResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineCaptureResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
request = build_capture_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._capture_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
@distributed_trace_async
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]:
"""Captures the VM by copying virtual hard disks of the VM and outputs a template that can be used
to create similar VMs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Capture Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineCaptureResult or the
result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineCaptureResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._capture_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachine')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to create or update a virtual machine. Please note some properties can be set
only during virtual machine creation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Create Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'VirtualMachineUpdate')
request = build_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
"""The operation to update a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Update Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineUpdate
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachine or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachine]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
force_deletion=force_deletion,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to delete a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param force_deletion: Optional parameter to force delete virtual machines.
:type force_deletion: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
force_deletion=force_deletion,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_name: str,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> "_models.VirtualMachine":
"""Retrieves information about the model view or the instance view of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param expand: The expand expression to apply on the operation. 'InstanceView' retrieves a
snapshot of the runtime properties of the virtual machine that is managed by the platform and
can change outside of control plane operations. 'UserData' retrieves the UserData property as
part of the VM model view that was provided by the user during the VM Create/Update operation.
:type expand: str or ~azure.mgmt.compute.v2021_04_01.models.InstanceViewTypes
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachine, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachine
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachine"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'} # type: ignore
@distributed_trace_async
async def instance_view(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> "_models.VirtualMachineInstanceView":
"""Retrieves information about the run-time state of a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineInstanceView, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstanceView
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstanceView"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_instance_view_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'} # type: ignore
async def _convert_to_managed_disks_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_convert_to_managed_disks_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._convert_to_managed_disks_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_convert_to_managed_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore
@distributed_trace_async
async def begin_convert_to_managed_disks(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Converts virtual machine disks from blob-based to managed disks. Virtual machine must be
stop-deallocated before invoking this operation.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._convert_to_managed_disks_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_convert_to_managed_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'} # type: ignore
async def _deallocate_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_deallocate_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._deallocate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def begin_deallocate(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Shuts down the virtual machine and releases the compute resources. You are not billed for the
compute resources that this virtual machine uses.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._deallocate_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'} # type: ignore
@distributed_trace_async
async def generalize(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""Sets the OS state of the virtual machine to generalized. It is recommended to sysprep the
virtual machine before performing this operation. :code:`<br>`For Windows, please refer to
`Create a managed image of a generalized VM in Azure
<https://docs.microsoft.com/azure/virtual-machines/windows/capture-image-resource>`_.:code:`<br>`For
Linux, please refer to `How to create an image of a virtual machine or VHD
<https://docs.microsoft.com/azure/virtual-machines/linux/capture-image>`_.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generalize_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.generalize.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified resource group. Use the nextLink property in
the response to get the next page of virtual machines.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_all(
self,
status_only: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
"""Lists all of the virtual machines in the specified subscription. Use the nextLink property in
the response to get the next page of virtual machines.
:param status_only: statusOnly=true enables fetching run time status of all Virtual Machines in
the subscription.
:type status_only: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=self.list_all.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'} # type: ignore
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
"""Lists all available virtual machine sizes to which the specified virtual machine can be
resized.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either VirtualMachineSizeListResult or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineSizeListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineSizeListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'} # type: ignore
async def _power_off_initial(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
skip_shutdown=skip_shutdown,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
@distributed_trace_async
async def begin_power_off(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to power off (stop) a virtual machine. The virtual machine can be restarted with
the same provisioned resources. You are still charged for this virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param skip_shutdown: The parameter to request non-graceful VM shutdown. True value for this
flag indicates non-graceful shutdown whereas false indicates otherwise. Default value for this
flag is false if not specified.
:type skip_shutdown: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._power_off_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
skip_shutdown=skip_shutdown,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'} # type: ignore
async def _reapply_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reapply_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._reapply_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reapply_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore
@distributed_trace_async
async def begin_reapply(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to reapply a virtual machine's state.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reapply_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reapply.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'} # type: ignore
async def _restart_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
@distributed_trace_async
async def begin_restart(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to restart a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to start a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'} # type: ignore
async def _redeploy_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_redeploy_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._redeploy_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
@distributed_trace_async
async def begin_redeploy(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Shuts down the virtual machine, moves it to a new node, and powers it back on.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._redeploy_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'} # type: ignore
async def _reimage_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
if parameters is not None:
_json = self._serialize.body(parameters, 'VirtualMachineReimageParameters')
else:
_json = None
request = build_reimage_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore
@distributed_trace_async
async def begin_reimage(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Reimages the virtual machine which has an ephemeral OS disk back to its initial state.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Reimage Virtual Machine operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.VirtualMachineReimageParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._reimage_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'} # type: ignore
@distributed_trace_async
async def retrieve_boot_diagnostics_data(
self,
resource_group_name: str,
vm_name: str,
sas_uri_expiration_time_in_minutes: Optional[int] = None,
**kwargs: Any
) -> "_models.RetrieveBootDiagnosticsDataResult":
"""The operation to retrieve SAS URIs for a virtual machine's boot diagnostic logs.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param sas_uri_expiration_time_in_minutes: Expiration duration in minutes for the SAS URIs with
a value between 1 to 1440 minutes. :code:`<br>`:code:`<br>`NOTE: If not specified, SAS URIs
will be generated with a default expiration duration of 120 minutes.
:type sas_uri_expiration_time_in_minutes: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: RetrieveBootDiagnosticsDataResult, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2021_04_01.models.RetrieveBootDiagnosticsDataResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.RetrieveBootDiagnosticsDataResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_boot_diagnostics_data_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
sas_uri_expiration_time_in_minutes=sas_uri_expiration_time_in_minutes,
template_url=self.retrieve_boot_diagnostics_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RetrieveBootDiagnosticsDataResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_boot_diagnostics_data.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData'} # type: ignore
async def _perform_maintenance_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_perform_maintenance_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._perform_maintenance_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_perform_maintenance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore
@distributed_trace_async
async def begin_perform_maintenance(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""The operation to perform maintenance on a virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._perform_maintenance_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'} # type: ignore
@distributed_trace_async
async def simulate_eviction(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
"""The operation to simulate the eviction of spot virtual machine.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_simulate_eviction_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.simulate_eviction.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
simulate_eviction.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction'} # type: ignore
async def _assess_patches_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.VirtualMachineAssessPatchesResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineAssessPatchesResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_assess_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._assess_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_assess_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore
@distributed_trace_async
async def begin_assess_patches(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineAssessPatchesResult"]:
"""Assess patches on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineAssessPatchesResult or
the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineAssessPatchesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineAssessPatchesResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._assess_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_assess_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'} # type: ignore
async def _install_patches_initial(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineInstallPatchesResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.VirtualMachineInstallPatchesResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(install_patches_input, 'VirtualMachineInstallPatchesParameters')
request = build_install_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._install_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_install_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore
@distributed_trace_async
async def begin_install_patches(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineInstallPatchesResult"]:
"""Installs patches on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param install_patches_input: Input for InstallPatches as directly received by the API.
:type install_patches_input:
~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either VirtualMachineInstallPatchesResult
or the result of cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.VirtualMachineInstallPatchesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineInstallPatchesResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._install_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
install_patches_input=install_patches_input,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_install_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'} # type: ignore
async def _run_command_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> Optional["_models.RunCommandResult"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.RunCommandResult"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'RunCommandInput')
request = build_run_command_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._run_command_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_run_command_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
@distributed_trace_async
async def begin_run_command(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> AsyncLROPoller["_models.RunCommandResult"]:
"""Run command on the VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param vm_name: The name of the virtual machine.
:type vm_name: str
:param parameters: Parameters supplied to the Run command operation.
:type parameters: ~azure.mgmt.compute.v2021_04_01.models.RunCommandInput
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either RunCommandResult or the result of
cls(response)
:rtype:
~azure.core.polling.AsyncLROPoller[~azure.mgmt.compute.v2021_04_01.models.RunCommandResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.RunCommandResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._run_command_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_run_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'} # type: ignore
| 46.167319 | 873 | 0.668468 |
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._virtual_machines_operations import build_assess_patches_request_initial, build_capture_request_initial, build_convert_to_managed_disks_request_initial, build_create_or_update_request_initial, build_deallocate_request_initial, build_delete_request_initial, build_generalize_request, build_get_request, build_install_patches_request_initial, build_instance_view_request, build_list_all_request, build_list_available_sizes_request, build_list_by_location_request, build_list_request, build_perform_maintenance_request_initial, build_power_off_request_initial, build_reapply_request_initial, build_redeploy_request_initial, build_reimage_request_initial, build_restart_request_initial, build_retrieve_boot_diagnostics_data_request, build_run_command_request_initial, build_simulate_eviction_request, build_start_request_initial, build_update_request_initial
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class VirtualMachinesOperations:
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list_by_location(
self,
location: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=self.list_by_location.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_by_location_request(
location=location,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_location.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/virtualMachines'}
async def _capture_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineCaptureResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'VirtualMachineCaptureParameters')
request = build_capture_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._capture_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_capture_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'}
@distributed_trace_async
async def begin_capture(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineCaptureParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineCaptureResult"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._capture_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_capture.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/capture'}
async def _create_or_update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'VirtualMachine')
request = build_create_or_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._create_or_update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachine",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
async def _update_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'VirtualMachineUpdate')
request = build_update_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._update_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.VirtualMachineUpdate",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachine"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
async def _delete_initial(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
force_deletion=force_deletion,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
vm_name: str,
force_deletion: Optional[bool] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
force_deletion=force_deletion,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
@distributed_trace_async
async def get(
self,
resource_group_name: str,
vm_name: str,
expand: Optional[Union[str, "_models.InstanceViewTypes"]] = None,
**kwargs: Any
) -> "_models.VirtualMachine":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
expand=expand,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachine', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}'}
@distributed_trace_async
async def instance_view(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> "_models.VirtualMachineInstanceView":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_instance_view_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.instance_view.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineInstanceView', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
instance_view.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/instanceView'}
async def _convert_to_managed_disks_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_convert_to_managed_disks_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._convert_to_managed_disks_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_convert_to_managed_disks_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'}
@distributed_trace_async
async def begin_convert_to_managed_disks(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._convert_to_managed_disks_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_convert_to_managed_disks.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/convertToManagedDisks'}
async def _deallocate_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_deallocate_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._deallocate_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_deallocate_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'}
@distributed_trace_async
async def begin_deallocate(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._deallocate_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_deallocate.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/deallocate'}
@distributed_trace_async
async def generalize(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_generalize_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.generalize.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
generalize.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/generalize'}
@distributed_trace
def list(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines'}
@distributed_trace
def list_all(
self,
status_only: Optional[str] = None,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=self.list_all.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_all_request(
subscription_id=self._config.subscription_id,
status_only=status_only,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/virtualMachines'}
@distributed_trace
def list_available_sizes(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncIterable["_models.VirtualMachineSizeListResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.list_available_sizes.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_available_sizes_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("VirtualMachineSizeListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_available_sizes.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/vmSizes'}
async def _power_off_initial(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_power_off_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
skip_shutdown=skip_shutdown,
template_url=self._power_off_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_power_off_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'}
@distributed_trace_async
async def begin_power_off(
self,
resource_group_name: str,
vm_name: str,
skip_shutdown: Optional[bool] = False,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._power_off_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
skip_shutdown=skip_shutdown,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_power_off.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/powerOff'}
async def _reapply_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_reapply_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._reapply_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reapply_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'}
@distributed_trace_async
async def begin_reapply(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._reapply_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reapply.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reapply'}
async def _restart_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_restart_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._restart_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_restart_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'}
@distributed_trace_async
async def begin_restart(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._restart_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_restart.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/restart'}
async def _start_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_start_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._start_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'}
@distributed_trace_async
async def begin_start(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/start'}
async def _redeploy_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_redeploy_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._redeploy_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_redeploy_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'}
@distributed_trace_async
async def begin_redeploy(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._redeploy_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_redeploy.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/redeploy'}
async def _reimage_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
if parameters is not None:
_json = self._serialize.body(parameters, 'VirtualMachineReimageParameters')
else:
_json = None
request = build_reimage_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._reimage_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_reimage_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'}
@distributed_trace_async
async def begin_reimage(
self,
resource_group_name: str,
vm_name: str,
parameters: Optional["_models.VirtualMachineReimageParameters"] = None,
**kwargs: Any
) -> AsyncLROPoller[None]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._reimage_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_reimage.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/reimage'}
@distributed_trace_async
async def retrieve_boot_diagnostics_data(
self,
resource_group_name: str,
vm_name: str,
sas_uri_expiration_time_in_minutes: Optional[int] = None,
**kwargs: Any
) -> "_models.RetrieveBootDiagnosticsDataResult":
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_retrieve_boot_diagnostics_data_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
sas_uri_expiration_time_in_minutes=sas_uri_expiration_time_in_minutes,
template_url=self.retrieve_boot_diagnostics_data.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('RetrieveBootDiagnosticsDataResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
retrieve_boot_diagnostics_data.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/retrieveBootDiagnosticsData'}
async def _perform_maintenance_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_perform_maintenance_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._perform_maintenance_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_perform_maintenance_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'}
@distributed_trace_async
async def begin_perform_maintenance(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._perform_maintenance_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_perform_maintenance.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/performMaintenance'}
@distributed_trace_async
async def simulate_eviction(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_simulate_eviction_request(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self.simulate_eviction.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
simulate_eviction.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/simulateEviction'}
async def _assess_patches_initial(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> Optional["_models.VirtualMachineAssessPatchesResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_assess_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
template_url=self._assess_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_assess_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'}
@distributed_trace_async
async def begin_assess_patches(
self,
resource_group_name: str,
vm_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineAssessPatchesResult"]:
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._assess_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineAssessPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_assess_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/assessPatches'}
async def _install_patches_initial(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> Optional["_models.VirtualMachineInstallPatchesResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(install_patches_input, 'VirtualMachineInstallPatchesParameters')
request = build_install_patches_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._install_patches_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_install_patches_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'}
@distributed_trace_async
async def begin_install_patches(
self,
resource_group_name: str,
vm_name: str,
install_patches_input: "_models.VirtualMachineInstallPatchesParameters",
**kwargs: Any
) -> AsyncLROPoller["_models.VirtualMachineInstallPatchesResult"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._install_patches_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
install_patches_input=install_patches_input,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('VirtualMachineInstallPatchesResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_install_patches.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/installPatches'}
async def _run_command_initial(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> Optional["_models.RunCommandResult"]:
cls = kwargs.pop('cls', None)
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json")
_json = self._serialize.body(parameters, 'RunCommandInput')
request = build_run_command_request_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
subscription_id=self._config.subscription_id,
content_type=content_type,
json=_json,
template_url=self._run_command_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_run_command_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'}
@distributed_trace_async
async def begin_run_command(
self,
resource_group_name: str,
vm_name: str,
parameters: "_models.RunCommandInput",
**kwargs: Any
) -> AsyncLROPoller["_models.RunCommandResult"]:
content_type = kwargs.pop('content_type', "application/json")
polling = kwargs.pop('polling', True)
cls = kwargs.pop('cls', None)
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None)
if cont_token is None:
raw_result = await self._run_command_initial(
resource_group_name=resource_group_name,
vm_name=vm_name,
parameters=parameters,
content_type=content_type,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
response = pipeline_response.http_response
deserialized = self._deserialize('RunCommandResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_run_command.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/virtualMachines/{vmName}/runCommand'}
| true | true |
f7248f210691f99763f6311365ed9eb869ed4aa4 | 20,439 | py | Python | mlrun/runtimes/pod.py | Michaelliv/mlrun | f155836f71e86cfcc573bcf1aa35762d72feeb5a | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/pod.py | Michaelliv/mlrun | f155836f71e86cfcc573bcf1aa35762d72feeb5a | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/pod.py | Michaelliv/mlrun | f155836f71e86cfcc573bcf1aa35762d72feeb5a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import typing
import uuid
from enum import Enum
from kfp.dsl import ContainerOp, _container_op
from kubernetes import client
import mlrun.errors
import mlrun.utils.regex
from ..config import config as mlconf
from ..utils import logger, normalize_name, update_in, verify_field_regex
from .base import BaseRuntime, FunctionSpec
from .utils import (
apply_kfp,
generate_resources,
get_item_name,
get_resource_labels,
set_named_item,
)
class KubeResourceSpec(FunctionSpec):
def __init__(
self,
command=None,
args=None,
image=None,
mode=None,
volumes=None,
volume_mounts=None,
env=None,
resources=None,
default_handler=None,
pythonpath=None,
entry_points=None,
description=None,
workdir=None,
replicas=None,
image_pull_policy=None,
service_account=None,
build=None,
image_pull_secret=None,
node_name=None,
node_selector=None,
affinity=None,
mount_applied=False,
priority_class_name=None,
):
super().__init__(
command=command,
args=args,
image=image,
mode=mode,
build=build,
entry_points=entry_points,
description=description,
workdir=workdir,
default_handler=default_handler,
pythonpath=pythonpath,
mount_applied=mount_applied,
)
self._volumes = {}
self._volume_mounts = {}
self.volumes = volumes or []
self.volume_mounts = volume_mounts or []
self.env = env or []
self.resources = resources or {}
self.replicas = replicas
self.image_pull_policy = image_pull_policy
self.service_account = service_account
self.image_pull_secret = image_pull_secret
self.node_name = node_name
self.node_selector = (
node_selector or mlrun.mlconf.get_default_function_node_selector()
)
self._affinity = affinity
self.priority_class_name = (
priority_class_name or mlrun.mlconf.default_function_priority_class_name
)
@property
def volumes(self) -> list:
return list(self._volumes.values())
@volumes.setter
def volumes(self, volumes):
self._volumes = {}
if volumes:
for vol in volumes:
set_named_item(self._volumes, vol)
@property
def volume_mounts(self) -> list:
return list(self._volume_mounts.values())
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
self._volume_mounts = {}
if volume_mounts:
for volume_mount in volume_mounts:
self._set_volume_mount(volume_mount)
@property
def affinity(self) -> client.V1Affinity:
return self._affinity
@affinity.setter
def affinity(self, affinity):
self._affinity = self._transform_affinity_to_k8s_class_instance(affinity)
def to_dict(self, fields=None, exclude=None):
struct = super().to_dict(fields, exclude=["affinity"])
api = client.ApiClient()
struct["affinity"] = api.sanitize_for_serialization(self.affinity)
return struct
def update_vols_and_mounts(self, volumes, volume_mounts):
if volumes:
for vol in volumes:
set_named_item(self._volumes, vol)
if volume_mounts:
for volume_mount in volume_mounts:
self._set_volume_mount(volume_mount)
def _get_affinity_as_k8s_class_instance(self):
pass
def _transform_affinity_to_k8s_class_instance(self, affinity):
if not affinity:
return None
if isinstance(affinity, dict):
api = client.ApiClient()
# not ideal to use their private method, but looks like that's the only option
# Taken from https://github.com/kubernetes-client/python/issues/977
affinity = api._ApiClient__deserialize(affinity, "V1Affinity")
return affinity
def _get_sanitized_affinity(self):
"""
When using methods like to_dict() on kubernetes class instances we're getting the attributes in snake_case
Which is ok if we're using the kubernetes python package but not if for example we're creating CRDs that we
apply directly. For that we need the sanitized (CamelCase) version.
"""
if not self.affinity:
return {}
if isinstance(self.affinity, dict):
# heuristic - if node_affinity is part of the dict it means to_dict on the kubernetes object performed,
# there's nothing we can do at that point to transform it to the sanitized version
if "node_affinity" in self.affinity:
raise mlrun.errors.MLRunInvalidArgumentError(
"Affinity must be instance of kubernetes' V1Affinity class"
)
elif "nodeAffinity" in self.affinity:
# then it's already the sanitized version
return self.affinity
api = client.ApiClient()
return api.sanitize_for_serialization(self.affinity)
def _set_volume_mount(self, volume_mount):
# calculate volume mount hash
volume_name = get_item_name(volume_mount, "name")
volume_sub_path = get_item_name(volume_mount, "subPath")
volume_mount_path = get_item_name(volume_mount, "mountPath")
volume_mount_key = hash(f"{volume_name}-{volume_sub_path}-{volume_mount_path}")
self._volume_mounts[volume_mount_key] = volume_mount
class AutoMountType(str, Enum):
none = "none"
auto = "auto"
v3io_credentials = "v3io_credentials"
v3io_fuse = "v3io_fuse"
pvc = "pvc"
@classmethod
def _missing_(cls, value):
return AutoMountType.default()
@staticmethod
def default():
return AutoMountType.auto
# Any modifier that configures a mount on a runtime should be included here. These modifiers, if applied to the
# runtime, will suppress the auto-mount functionality.
@classmethod
def all_mount_modifiers(cls):
return [
mlrun.v3io_cred.__name__,
mlrun.mount_v3io.__name__,
mlrun.platforms.other.mount_pvc.__name__,
mlrun.auto_mount.__name__,
]
@staticmethod
def _get_auto_modifier():
# If we're running on Iguazio - use v3io_cred
if mlconf.igz_version != "":
return mlrun.v3io_cred
# Else, either pvc mount if it's configured or do nothing otherwise
pvc_configured = (
"MLRUN_PVC_MOUNT" in os.environ
or "pvc_name" in mlconf.get_storage_auto_mount_params()
)
return mlrun.platforms.other.mount_pvc if pvc_configured else None
def get_modifier(self):
return {
AutoMountType.none: None,
AutoMountType.v3io_credentials: mlrun.v3io_cred,
AutoMountType.v3io_fuse: mlrun.mount_v3io,
AutoMountType.pvc: mlrun.platforms.other.mount_pvc,
AutoMountType.auto: self._get_auto_modifier(),
}[self]
class KubeResource(BaseRuntime):
kind = "job"
_is_nested = True
def __init__(self, spec=None, metadata=None):
super().__init__(metadata, spec)
self.verbose = False
@property
def spec(self) -> KubeResourceSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", KubeResourceSpec)
def to_dict(self, fields=None, exclude=None, strip=False):
struct = super().to_dict(fields, exclude, strip=strip)
api = client.ApiClient()
struct = api.sanitize_for_serialization(struct)
if strip:
spec = struct["spec"]
for attr in ["volumes", "volume_mounts"]:
if attr in spec:
del spec[attr]
if "env" in spec and spec["env"]:
for ev in spec["env"]:
if ev["name"].startswith("V3IO_"):
ev["value"] = ""
return struct
def apply(self, modify):
# Kubeflow pipeline have a hook to add the component to the DAG on ContainerOp init
# we remove the hook to suppress kubeflow op registration and return it after the apply()
old_op_handler = _container_op._register_op_handler
_container_op._register_op_handler = lambda x: self.metadata.name
cop = ContainerOp("name", "image")
_container_op._register_op_handler = old_op_handler
return apply_kfp(modify, cop, self)
def set_env_from_secret(self, name, secret=None, secret_key=None):
"""set pod environment var from secret"""
secret_key = secret_key or name
value_from = client.V1EnvVarSource(
secret_key_ref=client.V1SecretKeySelector(name=secret, key=secret_key)
)
return self._set_env(name, value_from=value_from)
def set_env(self, name, value):
"""set pod environment var from value"""
return self._set_env(name, value=str(value))
def is_env_exists(self, name):
"""Check whether there is an environment variable define for the given key"""
for env_var in self.spec.env:
if get_item_name(env_var) == name:
return True
return False
def _set_env(self, name, value=None, value_from=None):
new_var = client.V1EnvVar(name=name, value=value, value_from=value_from)
i = 0
for v in self.spec.env:
if get_item_name(v) == name:
self.spec.env[i] = new_var
return self
i += 1
self.spec.env.append(new_var)
return self
def set_envs(self, env_vars):
"""set pod environment var key/value dict"""
for name, value in env_vars.items():
self.set_env(name, value)
return self
def gpus(self, gpus, gpu_type="nvidia.com/gpu"):
update_in(self.spec.resources, ["limits", gpu_type], gpus)
def with_limits(self, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"):
"""set pod cpu/memory/gpu limits"""
self._verify_and_set_limits("resources", mem, cpu, gpus, gpu_type)
def with_requests(self, mem=None, cpu=None):
"""set requested (desired) pod cpu/memory resources"""
self._verify_and_set_requests("resources", mem, cpu)
def with_node_selection(
self,
node_name: typing.Optional[str] = None,
node_selector: typing.Optional[typing.Dict[str, str]] = None,
affinity: typing.Optional[client.V1Affinity] = None,
):
"""
Enables to control on which k8s node the job will run
:param node_name: The name of the k8s node
:param node_selector: Label selector, only nodes with matching labels will be eligible to be picked
:param affinity: Expands the types of constraints you can express - see
https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity
for details
"""
if node_name:
self.spec.node_name = node_name
if node_selector:
self.spec.node_selector = node_selector
if affinity:
self.spec.affinity = affinity
def with_priority_class(self, name: typing.Optional[str] = None):
"""
Enables to control the priority of the pod
If not passed - will default to mlrun.mlconf.default_function_priority_class_name
:param name: The name of the priority class
"""
if name is None:
name = mlconf.default_function_priority_class_name
valid_priority_class_names = self.list_valid_and_default_priority_class_names()[
"valid_function_priority_class_names"
]
if name not in valid_priority_class_names:
message = "Priority class name not in available priority class names"
logger.warning(
message,
priority_class_name=name,
valid_priority_class_names=valid_priority_class_names,
)
raise mlrun.errors.MLRunInvalidArgumentError(message)
self.spec.priority_class_name = name
def list_valid_and_default_priority_class_names(self):
return {
"default_function_priority_class_name": mlconf.default_function_priority_class_name,
"valid_function_priority_class_names": mlconf.get_valid_function_priority_class_names(),
}
def _verify_and_set_limits(
self,
resources_field_name,
mem=None,
cpu=None,
gpus=None,
gpu_type="nvidia.com/gpu",
):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if gpus:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.gpus",
gpus,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
update_in(
getattr(self.spec, resources_field_name),
"limits",
generate_resources(mem=mem, cpu=cpu, gpus=gpus, gpu_type=gpu_type),
)
def _verify_and_set_requests(self, resources_field_name, mem=None, cpu=None):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
update_in(
getattr(self.spec, resources_field_name),
"requests",
generate_resources(mem=mem, cpu=cpu),
)
def _get_meta(self, runobj, unique=False):
namespace = self._get_k8s().resolve_namespace()
labels = get_resource_labels(self, runobj, runobj.spec.scrape_metrics)
new_meta = client.V1ObjectMeta(namespace=namespace, labels=labels)
name = runobj.metadata.name or "mlrun"
norm_name = f"{normalize_name(name)}-"
if unique:
norm_name += uuid.uuid4().hex[:8]
new_meta.name = norm_name
runobj.set_label("mlrun/job", norm_name)
else:
new_meta.generate_name = norm_name
return new_meta
def _add_azure_vault_params_to_spec(self, k8s_secret_name=None):
secret_name = (
k8s_secret_name or mlconf.secret_stores.azure_vault.default_secret_name
)
if not secret_name:
logger.warning(
"No k8s secret provided. Azure key vault will not be available"
)
return
# We cannot use expanduser() here, since the user in question is the user running in the pod
# itself (which is root) and not where this code is running. That's why this hacky replacement is needed.
secret_path = mlconf.secret_stores.azure_vault.secret_path.replace("~", "/root")
volumes = [
{
"name": "azure-vault-secret",
"secret": {"defaultMode": 420, "secretName": secret_name},
}
]
volume_mounts = [{"name": "azure-vault-secret", "mountPath": secret_path}]
self.spec.update_vols_and_mounts(volumes, volume_mounts)
def _add_project_k8s_secrets_to_spec(self, secrets, runobj=None, project=None):
project_name = project or runobj.metadata.project
if project_name is None:
logger.warning("No project provided. Cannot add k8s secrets")
return
secret_name = self._get_k8s().get_project_secret_name(project_name)
existing_secret_keys = (
self._get_k8s().get_project_secret_keys(project_name) or {}
)
# If no secrets were passed, we need all existing keys
if not secrets:
secrets = {
key: self._secrets.k8s_env_variable_name_for_secret(key)
for key in existing_secret_keys
}
for key, env_var_name in secrets.items():
if key in existing_secret_keys:
self.set_env_from_secret(env_var_name, secret_name, key)
def _add_vault_params_to_spec(self, runobj=None, project=None):
project_name = project or runobj.metadata.project
if project_name is None:
logger.warning("No project provided. Cannot add vault parameters")
return
service_account_name = mlconf.secret_stores.vault.project_service_account_name.format(
project=project_name
)
project_vault_secret_name = self._get_k8s().get_project_vault_secret_name(
project_name, service_account_name
)
if project_vault_secret_name is None:
logger.info(f"No vault secret associated with project {project_name}")
return
volumes = [
{
"name": "vault-secret",
"secret": {"defaultMode": 420, "secretName": project_vault_secret_name},
}
]
# We cannot use expanduser() here, since the user in question is the user running in the pod
# itself (which is root) and not where this code is running. That's why this hacky replacement is needed.
token_path = mlconf.secret_stores.vault.token_path.replace("~", "/root")
volume_mounts = [{"name": "vault-secret", "mountPath": token_path}]
self.spec.update_vols_and_mounts(volumes, volume_mounts)
self.spec.env.append(
{
"name": "MLRUN_SECRET_STORES__VAULT__ROLE",
"value": f"project:{project_name}",
}
)
# In case remote URL is different than local URL, use it. Else, use the local URL
vault_url = mlconf.secret_stores.vault.remote_url
if vault_url == "":
vault_url = mlconf.secret_stores.vault.url
self.spec.env.append(
{"name": "MLRUN_SECRET_STORES__VAULT__URL", "value": vault_url}
)
def try_auto_mount_based_on_config(self):
if self.spec.mount_applied:
logger.debug("Mount already applied - not performing auto-mount")
return
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
modifier = auto_mount_type.get_modifier()
if not modifier:
logger.debug("Auto mount disabled due to user selection")
return
mount_params_dict = mlconf.get_storage_auto_mount_params()
self.apply(modifier(**mount_params_dict))
def kube_resource_spec_to_pod_spec(
kube_resource_spec: KubeResourceSpec, container: client.V1Container
):
return client.V1PodSpec(
containers=[container],
restart_policy="Never",
volumes=kube_resource_spec.volumes,
service_account=kube_resource_spec.service_account,
node_name=kube_resource_spec.node_name,
node_selector=kube_resource_spec.node_selector,
affinity=kube_resource_spec.affinity,
priority_class_name=kube_resource_spec.priority_class_name
if len(mlconf.get_valid_function_priority_class_names())
else None,
)
| 36.563506 | 131 | 0.632467 |
import os
import typing
import uuid
from enum import Enum
from kfp.dsl import ContainerOp, _container_op
from kubernetes import client
import mlrun.errors
import mlrun.utils.regex
from ..config import config as mlconf
from ..utils import logger, normalize_name, update_in, verify_field_regex
from .base import BaseRuntime, FunctionSpec
from .utils import (
apply_kfp,
generate_resources,
get_item_name,
get_resource_labels,
set_named_item,
)
class KubeResourceSpec(FunctionSpec):
def __init__(
self,
command=None,
args=None,
image=None,
mode=None,
volumes=None,
volume_mounts=None,
env=None,
resources=None,
default_handler=None,
pythonpath=None,
entry_points=None,
description=None,
workdir=None,
replicas=None,
image_pull_policy=None,
service_account=None,
build=None,
image_pull_secret=None,
node_name=None,
node_selector=None,
affinity=None,
mount_applied=False,
priority_class_name=None,
):
super().__init__(
command=command,
args=args,
image=image,
mode=mode,
build=build,
entry_points=entry_points,
description=description,
workdir=workdir,
default_handler=default_handler,
pythonpath=pythonpath,
mount_applied=mount_applied,
)
self._volumes = {}
self._volume_mounts = {}
self.volumes = volumes or []
self.volume_mounts = volume_mounts or []
self.env = env or []
self.resources = resources or {}
self.replicas = replicas
self.image_pull_policy = image_pull_policy
self.service_account = service_account
self.image_pull_secret = image_pull_secret
self.node_name = node_name
self.node_selector = (
node_selector or mlrun.mlconf.get_default_function_node_selector()
)
self._affinity = affinity
self.priority_class_name = (
priority_class_name or mlrun.mlconf.default_function_priority_class_name
)
@property
def volumes(self) -> list:
return list(self._volumes.values())
@volumes.setter
def volumes(self, volumes):
self._volumes = {}
if volumes:
for vol in volumes:
set_named_item(self._volumes, vol)
@property
def volume_mounts(self) -> list:
return list(self._volume_mounts.values())
@volume_mounts.setter
def volume_mounts(self, volume_mounts):
self._volume_mounts = {}
if volume_mounts:
for volume_mount in volume_mounts:
self._set_volume_mount(volume_mount)
@property
def affinity(self) -> client.V1Affinity:
return self._affinity
@affinity.setter
def affinity(self, affinity):
self._affinity = self._transform_affinity_to_k8s_class_instance(affinity)
def to_dict(self, fields=None, exclude=None):
struct = super().to_dict(fields, exclude=["affinity"])
api = client.ApiClient()
struct["affinity"] = api.sanitize_for_serialization(self.affinity)
return struct
def update_vols_and_mounts(self, volumes, volume_mounts):
if volumes:
for vol in volumes:
set_named_item(self._volumes, vol)
if volume_mounts:
for volume_mount in volume_mounts:
self._set_volume_mount(volume_mount)
def _get_affinity_as_k8s_class_instance(self):
pass
def _transform_affinity_to_k8s_class_instance(self, affinity):
if not affinity:
return None
if isinstance(affinity, dict):
api = client.ApiClient()
# Taken from https://github.com/kubernetes-client/python/issues/977
affinity = api._ApiClient__deserialize(affinity, "V1Affinity")
return affinity
def _get_sanitized_affinity(self):
if not self.affinity:
return {}
if isinstance(self.affinity, dict):
# heuristic - if node_affinity is part of the dict it means to_dict on the kubernetes object performed,
# there's nothing we can do at that point to transform it to the sanitized version
if "node_affinity" in self.affinity:
raise mlrun.errors.MLRunInvalidArgumentError(
"Affinity must be instance of kubernetes' V1Affinity class"
)
elif "nodeAffinity" in self.affinity:
# then it's already the sanitized version
return self.affinity
api = client.ApiClient()
return api.sanitize_for_serialization(self.affinity)
def _set_volume_mount(self, volume_mount):
volume_name = get_item_name(volume_mount, "name")
volume_sub_path = get_item_name(volume_mount, "subPath")
volume_mount_path = get_item_name(volume_mount, "mountPath")
volume_mount_key = hash(f"{volume_name}-{volume_sub_path}-{volume_mount_path}")
self._volume_mounts[volume_mount_key] = volume_mount
class AutoMountType(str, Enum):
none = "none"
auto = "auto"
v3io_credentials = "v3io_credentials"
v3io_fuse = "v3io_fuse"
pvc = "pvc"
@classmethod
def _missing_(cls, value):
return AutoMountType.default()
@staticmethod
def default():
return AutoMountType.auto
@classmethod
def all_mount_modifiers(cls):
return [
mlrun.v3io_cred.__name__,
mlrun.mount_v3io.__name__,
mlrun.platforms.other.mount_pvc.__name__,
mlrun.auto_mount.__name__,
]
@staticmethod
def _get_auto_modifier():
if mlconf.igz_version != "":
return mlrun.v3io_cred
# Else, either pvc mount if it's configured or do nothing otherwise
pvc_configured = (
"MLRUN_PVC_MOUNT" in os.environ
or "pvc_name" in mlconf.get_storage_auto_mount_params()
)
return mlrun.platforms.other.mount_pvc if pvc_configured else None
def get_modifier(self):
return {
AutoMountType.none: None,
AutoMountType.v3io_credentials: mlrun.v3io_cred,
AutoMountType.v3io_fuse: mlrun.mount_v3io,
AutoMountType.pvc: mlrun.platforms.other.mount_pvc,
AutoMountType.auto: self._get_auto_modifier(),
}[self]
class KubeResource(BaseRuntime):
kind = "job"
_is_nested = True
def __init__(self, spec=None, metadata=None):
super().__init__(metadata, spec)
self.verbose = False
@property
def spec(self) -> KubeResourceSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", KubeResourceSpec)
def to_dict(self, fields=None, exclude=None, strip=False):
struct = super().to_dict(fields, exclude, strip=strip)
api = client.ApiClient()
struct = api.sanitize_for_serialization(struct)
if strip:
spec = struct["spec"]
for attr in ["volumes", "volume_mounts"]:
if attr in spec:
del spec[attr]
if "env" in spec and spec["env"]:
for ev in spec["env"]:
if ev["name"].startswith("V3IO_"):
ev["value"] = ""
return struct
def apply(self, modify):
old_op_handler = _container_op._register_op_handler
_container_op._register_op_handler = lambda x: self.metadata.name
cop = ContainerOp("name", "image")
_container_op._register_op_handler = old_op_handler
return apply_kfp(modify, cop, self)
def set_env_from_secret(self, name, secret=None, secret_key=None):
secret_key = secret_key or name
value_from = client.V1EnvVarSource(
secret_key_ref=client.V1SecretKeySelector(name=secret, key=secret_key)
)
return self._set_env(name, value_from=value_from)
def set_env(self, name, value):
return self._set_env(name, value=str(value))
def is_env_exists(self, name):
for env_var in self.spec.env:
if get_item_name(env_var) == name:
return True
return False
def _set_env(self, name, value=None, value_from=None):
new_var = client.V1EnvVar(name=name, value=value, value_from=value_from)
i = 0
for v in self.spec.env:
if get_item_name(v) == name:
self.spec.env[i] = new_var
return self
i += 1
self.spec.env.append(new_var)
return self
def set_envs(self, env_vars):
for name, value in env_vars.items():
self.set_env(name, value)
return self
def gpus(self, gpus, gpu_type="nvidia.com/gpu"):
update_in(self.spec.resources, ["limits", gpu_type], gpus)
def with_limits(self, mem=None, cpu=None, gpus=None, gpu_type="nvidia.com/gpu"):
self._verify_and_set_limits("resources", mem, cpu, gpus, gpu_type)
def with_requests(self, mem=None, cpu=None):
self._verify_and_set_requests("resources", mem, cpu)
def with_node_selection(
self,
node_name: typing.Optional[str] = None,
node_selector: typing.Optional[typing.Dict[str, str]] = None,
affinity: typing.Optional[client.V1Affinity] = None,
):
if node_name:
self.spec.node_name = node_name
if node_selector:
self.spec.node_selector = node_selector
if affinity:
self.spec.affinity = affinity
def with_priority_class(self, name: typing.Optional[str] = None):
if name is None:
name = mlconf.default_function_priority_class_name
valid_priority_class_names = self.list_valid_and_default_priority_class_names()[
"valid_function_priority_class_names"
]
if name not in valid_priority_class_names:
message = "Priority class name not in available priority class names"
logger.warning(
message,
priority_class_name=name,
valid_priority_class_names=valid_priority_class_names,
)
raise mlrun.errors.MLRunInvalidArgumentError(message)
self.spec.priority_class_name = name
def list_valid_and_default_priority_class_names(self):
return {
"default_function_priority_class_name": mlconf.default_function_priority_class_name,
"valid_function_priority_class_names": mlconf.get_valid_function_priority_class_names(),
}
def _verify_and_set_limits(
self,
resources_field_name,
mem=None,
cpu=None,
gpus=None,
gpu_type="nvidia.com/gpu",
):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if gpus:
verify_field_regex(
f"function.spec.{resources_field_name}.limits.gpus",
gpus,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
update_in(
getattr(self.spec, resources_field_name),
"limits",
generate_resources(mem=mem, cpu=cpu, gpus=gpus, gpu_type=gpu_type),
)
def _verify_and_set_requests(self, resources_field_name, mem=None, cpu=None):
if mem:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.memory",
mem,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
if cpu:
verify_field_regex(
f"function.spec.{resources_field_name}.requests.cpu",
cpu,
mlrun.utils.regex.k8s_resource_quantity_regex,
)
update_in(
getattr(self.spec, resources_field_name),
"requests",
generate_resources(mem=mem, cpu=cpu),
)
def _get_meta(self, runobj, unique=False):
namespace = self._get_k8s().resolve_namespace()
labels = get_resource_labels(self, runobj, runobj.spec.scrape_metrics)
new_meta = client.V1ObjectMeta(namespace=namespace, labels=labels)
name = runobj.metadata.name or "mlrun"
norm_name = f"{normalize_name(name)}-"
if unique:
norm_name += uuid.uuid4().hex[:8]
new_meta.name = norm_name
runobj.set_label("mlrun/job", norm_name)
else:
new_meta.generate_name = norm_name
return new_meta
def _add_azure_vault_params_to_spec(self, k8s_secret_name=None):
secret_name = (
k8s_secret_name or mlconf.secret_stores.azure_vault.default_secret_name
)
if not secret_name:
logger.warning(
"No k8s secret provided. Azure key vault will not be available"
)
return
secret_path = mlconf.secret_stores.azure_vault.secret_path.replace("~", "/root")
volumes = [
{
"name": "azure-vault-secret",
"secret": {"defaultMode": 420, "secretName": secret_name},
}
]
volume_mounts = [{"name": "azure-vault-secret", "mountPath": secret_path}]
self.spec.update_vols_and_mounts(volumes, volume_mounts)
def _add_project_k8s_secrets_to_spec(self, secrets, runobj=None, project=None):
project_name = project or runobj.metadata.project
if project_name is None:
logger.warning("No project provided. Cannot add k8s secrets")
return
secret_name = self._get_k8s().get_project_secret_name(project_name)
existing_secret_keys = (
self._get_k8s().get_project_secret_keys(project_name) or {}
)
# If no secrets were passed, we need all existing keys
if not secrets:
secrets = {
key: self._secrets.k8s_env_variable_name_for_secret(key)
for key in existing_secret_keys
}
for key, env_var_name in secrets.items():
if key in existing_secret_keys:
self.set_env_from_secret(env_var_name, secret_name, key)
def _add_vault_params_to_spec(self, runobj=None, project=None):
project_name = project or runobj.metadata.project
if project_name is None:
logger.warning("No project provided. Cannot add vault parameters")
return
service_account_name = mlconf.secret_stores.vault.project_service_account_name.format(
project=project_name
)
project_vault_secret_name = self._get_k8s().get_project_vault_secret_name(
project_name, service_account_name
)
if project_vault_secret_name is None:
logger.info(f"No vault secret associated with project {project_name}")
return
volumes = [
{
"name": "vault-secret",
"secret": {"defaultMode": 420, "secretName": project_vault_secret_name},
}
]
# We cannot use expanduser() here, since the user in question is the user running in the pod
# itself (which is root) and not where this code is running. That's why this hacky replacement is needed.
token_path = mlconf.secret_stores.vault.token_path.replace("~", "/root")
volume_mounts = [{"name": "vault-secret", "mountPath": token_path}]
self.spec.update_vols_and_mounts(volumes, volume_mounts)
self.spec.env.append(
{
"name": "MLRUN_SECRET_STORES__VAULT__ROLE",
"value": f"project:{project_name}",
}
)
vault_url = mlconf.secret_stores.vault.remote_url
if vault_url == "":
vault_url = mlconf.secret_stores.vault.url
self.spec.env.append(
{"name": "MLRUN_SECRET_STORES__VAULT__URL", "value": vault_url}
)
def try_auto_mount_based_on_config(self):
if self.spec.mount_applied:
logger.debug("Mount already applied - not performing auto-mount")
return
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
modifier = auto_mount_type.get_modifier()
if not modifier:
logger.debug("Auto mount disabled due to user selection")
return
mount_params_dict = mlconf.get_storage_auto_mount_params()
self.apply(modifier(**mount_params_dict))
def kube_resource_spec_to_pod_spec(
kube_resource_spec: KubeResourceSpec, container: client.V1Container
):
return client.V1PodSpec(
containers=[container],
restart_policy="Never",
volumes=kube_resource_spec.volumes,
service_account=kube_resource_spec.service_account,
node_name=kube_resource_spec.node_name,
node_selector=kube_resource_spec.node_selector,
affinity=kube_resource_spec.affinity,
priority_class_name=kube_resource_spec.priority_class_name
if len(mlconf.get_valid_function_priority_class_names())
else None,
)
| true | true |
f7248f61ab2a1f4fb14a9ffc0272aea78e49b1af | 7,677 | py | Python | var/spack/repos/builtin/packages/libint/package.py | rickgcv/spack | 4d4f7393a3522c1f690a2e9a9683bf0f8f3b43a6 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/libint/package.py | rickgcv/spack | 4d4f7393a3522c1f690a2e9a9683bf0f8f3b43a6 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | var/spack/repos/builtin/packages/libint/package.py | rickgcv/spack | 4d4f7393a3522c1f690a2e9a9683bf0f8f3b43a6 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2020-11-08T10:26:48.000Z | 2020-11-08T10:26:48.000Z | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
from spack import *
TUNE_VARIANTS = (
'none',
'cp2k-lmax-4',
'cp2k-lmax-5',
'cp2k-lmax-6',
'cp2k-lmax-7',
'molgw-lmax-4',
'molgw-lmax-5',
'molgw-lmax-6',
'molgw-lmax-7',
)
class Libint(AutotoolsPackage):
"""Libint is a high-performance library for computing
Gaussian integrals in quantum mechanics.
"""
homepage = "https://github.com/evaleev/libint"
url = "https://github.com/evaleev/libint/archive/v2.1.0.tar.gz"
version('2.6.0', sha256='4ae47e8f0b5632c3d2a956469a7920896708e9f0e396ec10071b8181e4c8d9fa')
version('2.4.2', sha256='86dff38065e69a3a51d15cfdc638f766044cb87e5c6682d960c14f9847e2eac3')
version('2.4.1', sha256='0513be124563fdbbc7cd3c7043e221df1bda236a037027ba9343429a27db8ce4')
version('2.4.0', sha256='52eb16f065406099dcfaceb12f9a7f7e329c9cfcf6ed9bfacb0cff7431dd6019')
version('2.2.0', sha256='f737d485f33ac819d7f28c6ce303b1f3a2296bfd2c14f7c1323f8c5d370bb0e3')
version('2.1.0', sha256='43c453a1663aa1c55294df89ff9ece3aefc8d1bbba5ea31dbfe71b2d812e24c8')
version('1.1.6', sha256='f201b0c621df678cfe8bdf3990796b8976ff194aba357ae398f2f29b0e2985a6')
version('1.1.5', sha256='ec8cd4a4ba1e1a98230165210c293632372f0e573acd878ed62e5ec6f8b6174b')
variant('fortran', default=False,
description='Build & install Fortran bindings')
variant('tune', default='none', multi=False,
values=TUNE_VARIANTS,
description='Tune libint for use with the given package')
# Build dependencies
depends_on('autoconf@2.52:', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
# Libint 2 dependencies
depends_on('boost', when='@2:')
depends_on('gmp', when='@2:')
for tvariant in TUNE_VARIANTS[1:]:
conflicts('tune={0}'.format(tvariant), when='@:2.5.99',
msg=('for versions prior to 2.6, tuning for specific'
'codes/configurations is not supported'))
def url_for_version(self, version):
base_url = "https://github.com/evaleev/libint/archive"
if version == Version('1.0.0'):
return "{0}/LIBINT_1_00.tar.gz".format(base_url)
elif version < Version('2.1.0'):
return "{0}/release-{1}.tar.gz".format(base_url, version.dashed)
else:
return "{0}/v{1}.tar.gz".format(base_url, version)
def autoreconf(self, spec, prefix):
libtoolize()
aclocal('-I', 'lib/autoconf')
autoconf()
if '@2.6.0:' in spec:
# skip tarball creation and removal of dir with generated code
filter_file(r'^(export::.*)\s+tgz$', r'\1', 'export/Makefile')
@property
def optflags(self):
flags = '-O2'
# Optimizations for the Intel compiler, suggested by CP2K
# See ../libxc/package.py for rationale and doc.
if '%intel' in self.spec:
flags += ' -xSSE4.2 -axAVX,CORE-AVX2 -ipo'
return flags
def setup_build_environment(self, env):
# Set optimization flags
env.set('CFLAGS', self.optflags)
env.set('CXXFLAGS', self.optflags)
# Change AR to xiar if we compile with Intel and we
# find the executable
if '%intel' in self.spec and which('xiar'):
env.set('AR', 'xiar')
def configure_args(self):
config_args = ['--enable-shared']
optflags = self.optflags
# Optimization flag names have changed in libint 2
if self.version < Version('2.0.0'):
config_args.extend([
'--with-cc-optflags={0}'.format(optflags),
'--with-cxx-optflags={0}'.format(optflags)
])
else:
config_args.extend([
'--with-cxx-optflags={0}'.format(optflags),
'--with-cxxgen-optflags={0}'.format(optflags)
])
# Options required by CP2K, removed in libint 2
if self.version < Version('2.0.0'):
config_args.extend([
'--with-libint-max-am=5',
'--with-libderiv-max-am1=4'
])
if '@2.6.0:' in self.spec:
config_args += ['--with-libint-exportdir=generated']
tune_value = self.spec.variants['tune'].value
if tune_value.startswith('cp2k'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-eri=1',
'--enable-eri2=1',
'--enable-eri3=1',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0},{1}'.format(lmax, lmax - 1),
'--with-eri2-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-eri3-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-opt-am=3',
# keep code-size at an acceptable limit,
# cf. https://github.com/evaleev/libint/wiki#program-specific-notes:
'--enable-generic-code',
'--disable-unrolling',
]
if tune_value.startswith('molgw'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-1body=1',
'--enable-eri=0',
'--enable-eri2=0',
'--enable-eri3=0',
'--with-multipole-max-order=0',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0}'.format(lmax),
'--with-eri2-max-am={0}'.format(lmax),
'--with-eri3-max-am={0}'.format(lmax),
'--with-opt-am=2',
'--enable-contracted-ints',
# keep code-size at an acceptable limit,
# cf. https://github.com/evaleev/libint/wiki#program-specific-notes:
'--enable-generic-code',
'--disable-unrolling',
]
return config_args
@property
def build_targets(self):
if '@2.6.0:' in self.spec:
return ['export']
return []
@when('@2.6.0:')
def install(self, spec, prefix):
"""
Starting from libint 2.6.0 we're using the 2-stage build
to get support for the Fortran bindings, required by some
packages (CP2K notably).
"""
# upstream says that using configure/make for the generated code
# is deprecated and one should use CMake, but with the currently
# recent 2.7.0.b1 it still doesn't work
with working_dir(os.path.join(self.build_directory, 'generated')):
# straight from the AutotoolsPackage class:
options = [
'--prefix={0}'.format(prefix),
'--enable-shared',
'--with-cxx-optflags={0}'.format(self.optflags),
]
if '+fortran' in spec:
options += ['--enable-fortran']
configure = Executable('./configure')
configure(*options)
make()
make('install')
def patch(self):
# Use Fortran compiler to link the Fortran example, not the C++
# compiler
if '+fortran' in self.spec and self.spec.satisfies('%nvhpc'):
filter_file('$(CXX) $(CXXFLAGS)', '$(FC) $(FCFLAGS)',
'export/fortran/Makefile', string=True)
| 37.44878 | 95 | 0.558421 |
import os
from spack import *
TUNE_VARIANTS = (
'none',
'cp2k-lmax-4',
'cp2k-lmax-5',
'cp2k-lmax-6',
'cp2k-lmax-7',
'molgw-lmax-4',
'molgw-lmax-5',
'molgw-lmax-6',
'molgw-lmax-7',
)
class Libint(AutotoolsPackage):
homepage = "https://github.com/evaleev/libint"
url = "https://github.com/evaleev/libint/archive/v2.1.0.tar.gz"
version('2.6.0', sha256='4ae47e8f0b5632c3d2a956469a7920896708e9f0e396ec10071b8181e4c8d9fa')
version('2.4.2', sha256='86dff38065e69a3a51d15cfdc638f766044cb87e5c6682d960c14f9847e2eac3')
version('2.4.1', sha256='0513be124563fdbbc7cd3c7043e221df1bda236a037027ba9343429a27db8ce4')
version('2.4.0', sha256='52eb16f065406099dcfaceb12f9a7f7e329c9cfcf6ed9bfacb0cff7431dd6019')
version('2.2.0', sha256='f737d485f33ac819d7f28c6ce303b1f3a2296bfd2c14f7c1323f8c5d370bb0e3')
version('2.1.0', sha256='43c453a1663aa1c55294df89ff9ece3aefc8d1bbba5ea31dbfe71b2d812e24c8')
version('1.1.6', sha256='f201b0c621df678cfe8bdf3990796b8976ff194aba357ae398f2f29b0e2985a6')
version('1.1.5', sha256='ec8cd4a4ba1e1a98230165210c293632372f0e573acd878ed62e5ec6f8b6174b')
variant('fortran', default=False,
description='Build & install Fortran bindings')
variant('tune', default='none', multi=False,
values=TUNE_VARIANTS,
description='Tune libint for use with the given package')
depends_on('autoconf@2.52:', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('boost', when='@2:')
depends_on('gmp', when='@2:')
for tvariant in TUNE_VARIANTS[1:]:
conflicts('tune={0}'.format(tvariant), when='@:2.5.99',
msg=('for versions prior to 2.6, tuning for specific'
'codes/configurations is not supported'))
def url_for_version(self, version):
base_url = "https://github.com/evaleev/libint/archive"
if version == Version('1.0.0'):
return "{0}/LIBINT_1_00.tar.gz".format(base_url)
elif version < Version('2.1.0'):
return "{0}/release-{1}.tar.gz".format(base_url, version.dashed)
else:
return "{0}/v{1}.tar.gz".format(base_url, version)
def autoreconf(self, spec, prefix):
libtoolize()
aclocal('-I', 'lib/autoconf')
autoconf()
if '@2.6.0:' in spec:
filter_file(r'^(export::.*)\s+tgz$', r'\1', 'export/Makefile')
@property
def optflags(self):
flags = '-O2'
if '%intel' in self.spec:
flags += ' -xSSE4.2 -axAVX,CORE-AVX2 -ipo'
return flags
def setup_build_environment(self, env):
env.set('CFLAGS', self.optflags)
env.set('CXXFLAGS', self.optflags)
if '%intel' in self.spec and which('xiar'):
env.set('AR', 'xiar')
def configure_args(self):
config_args = ['--enable-shared']
optflags = self.optflags
if self.version < Version('2.0.0'):
config_args.extend([
'--with-cc-optflags={0}'.format(optflags),
'--with-cxx-optflags={0}'.format(optflags)
])
else:
config_args.extend([
'--with-cxx-optflags={0}'.format(optflags),
'--with-cxxgen-optflags={0}'.format(optflags)
])
if self.version < Version('2.0.0'):
config_args.extend([
'--with-libint-max-am=5',
'--with-libderiv-max-am1=4'
])
if '@2.6.0:' in self.spec:
config_args += ['--with-libint-exportdir=generated']
tune_value = self.spec.variants['tune'].value
if tune_value.startswith('cp2k'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-eri=1',
'--enable-eri2=1',
'--enable-eri3=1',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0},{1}'.format(lmax, lmax - 1),
'--with-eri2-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-eri3-max-am={0},{1}'.format(lmax + 2, lmax + 1),
'--with-opt-am=3',
enable-generic-code',
'--disable-unrolling',
]
if tune_value.startswith('molgw'):
lmax = int(tune_value.split('-lmax-')[1])
config_args += [
'--enable-1body=1',
'--enable-eri=0',
'--enable-eri2=0',
'--enable-eri3=0',
'--with-multipole-max-order=0',
'--with-max-am={0}'.format(lmax),
'--with-eri-max-am={0}'.format(lmax),
'--with-eri2-max-am={0}'.format(lmax),
'--with-eri3-max-am={0}'.format(lmax),
'--with-opt-am=2',
'--enable-contracted-ints',
enable-generic-code',
'--disable-unrolling',
]
return config_args
@property
def build_targets(self):
if '@2.6.0:' in self.spec:
return ['export']
return []
@when('@2.6.0:')
def install(self, spec, prefix):
with working_dir(os.path.join(self.build_directory, 'generated')):
# straight from the AutotoolsPackage class:
options = [
'--prefix={0}'.format(prefix),
'--enable-shared',
'--with-cxx-optflags={0}'.format(self.optflags),
]
if '+fortran' in spec:
options += ['--enable-fortran']
configure = Executable('./configure')
configure(*options)
make()
make('install')
def patch(self):
# Use Fortran compiler to link the Fortran example, not the C++
# compiler
if '+fortran' in self.spec and self.spec.satisfies('%nvhpc'):
filter_file('$(CXX) $(CXXFLAGS)', '$(FC) $(FCFLAGS)',
'export/fortran/Makefile', string=True)
| true | true |
f7248fd3c08c05e53ab12b8b1c1daa7aa98f5d00 | 11,277 | py | Python | real_time.py | TheoPantaz/Control-of-robotic-vehicle-via-brain-activity | 4cae5a69503659581f510c748f59f045d1f2b145 | [
"MIT"
] | null | null | null | real_time.py | TheoPantaz/Control-of-robotic-vehicle-via-brain-activity | 4cae5a69503659581f510c748f59f045d1f2b145 | [
"MIT"
] | null | null | null | real_time.py | TheoPantaz/Control-of-robotic-vehicle-via-brain-activity | 4cae5a69503659581f510c748f59f045d1f2b145 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 18:06:40 2020
@author: Kokkinos
lines for telnet communication: 31,32,136,139,149,152,201,204,212,215,296
"""
from threading import Thread
import numpy as np
import scipy.io as sio
from pylsl import StreamInlet, resolve_stream
from tkinter import *
import telnetlib
import pickle
import threading
from graphics import Graphics
class rt(Graphics):
def __init__(self, mode = 'IMvsall', tim_window = 4, vote_window = 4, overlap = 0,
IM_window = 2, HOST = "192.168.4.1"):
if mode == 'IMvsall' or 'Rvsall' or 'IMvsRest' or 'CSP_OVR' or'sync':
self.mode = mode
else:
raise ValueError('Inappropriate mode value')
# self.HOST = HOST
# self.tn = telnetlib.Telnet(self.HOST)
with open("visual_cues.txt") as f:
content = f.readlines()
content = [line.rstrip('\n') for line in content]
self.Fs = int(content[0]) # Sampling Frequency
self.Rdur = int(content[1]) # Rest visual cue duration
self.Prdur = int(content[3])
content = np.array(content)
self.vcN = len(content) # number of visual cues
idxs = np.where(content == 'REST')
self.RN = len(idxs[0]) # number of REST visual cues
idxs = np.where(content == 'PREPARE')
self.PRN = len(idxs[0])
self.IMN = len(content) - self.RN - self.PRN - 4 # number of Imaginary Movements visual cues
try:
self.IMdur = int(content[2])
self.recdur = self.RN * self.Rdur * self.Fs + self.IMN * self.IMdur * self.Fs + self.PRN * self.Prdur * self.Fs # duration of the recording
except:
IMdur = list(content[2].split(','))
self.IMdur = [int(i) for i in IMdur]
self.IMdur = [np.random.randint(IMdur[0],IMdur[1]) for i in range(self.IMN)]
self.recdur = self.RN * self.Rdur * self.Fs + sum(self.IMdur) * self.Fs + self.PRN * self.Prdur * self.Fs # duration of the recording
self.content = np.delete(content,np.s_[:4])
if self.mode == 'sync':
self.tim_window = self.IMdur * self.Fs
self.vote_window = self.IMdur * self.Fs
self.step = (self.IMdur + self.Prdur + self.Rdur) * self.Fs
self.IM_window = 1
self.IMdur = [self.IMdur] * self.IMN
else:
self.tim_window = tim_window * self.Fs
self.vote_window = vote_window * self.Fs
self.overlap = overlap
self.step = int(self.tim_window * (1 - self.overlap))
self.IM_window = IM_window
Graphics.__init__(self)
def load_bcis(self, filename):
with open(filename, 'rb') as train:
self.bcis = pickle.load(train)
return self
def begin_stream(self):
print("looking for an EEG stream...")
self.streams = resolve_stream('type', 'EEG')
# create a new inlet to read from the stream
self.inlet = StreamInlet(self.streams[0])
def pred_im(self, chunk, cSTR):
self.pred = []
chunk = (np.array(chunk).T)/1000000000
chunk = chunk.reshape((1,chunk.shape[0],chunk.shape[1]))
for i, bci in enumerate(self.bcis[:-1]):
self.pred.append(bci.predict(chunk))
if self.pred[i] != 0:
self.vote[i] += 1
else:
self.vote[i] -= 1
self.pred.append(self.bcis[-1].predict(chunk))
if self.pred[-1] == 1:
self.vote[-1] += 1
else:
self.vote[-1] -= 1
if cSTR % self.vote_window == 0:
self.pred_decision()
def pred_decision(self):
if self.mode == 'IMvsall' or self.mode == 'IMvsRest':
if self.vote[0] <= 0 and self.vote[1] <= 0:
self.prediction.extend([0])
print("pred:rest")
elif self.vote[0] > 0:
self.prediction.extend([1])
if self.begin:
# self.tn.write(('1').encode('ascii'))
self.begin = False
else:
# self.tn.write(('4').encode('ascii'))
self.cIM += 1
print("pred:left")
else:
self.prediction.extend([2])
if self.begin:
# self.tn.write(('1').encode('ascii'))
self.begin = False
else:
# self.tn.write(('3').encode('ascii'))
self.cIM += 1
print("pred:right")
elif self.mode == 'Rvsall':
if self.vote[0] <= 0:
self.prediction.extend([0])
print("pred:rest")
elif self.vote[1] > 0:
self.prediction.extend([1])
if self.begin:
self.tn.write(('1').encode('ascii'))
self.begin = False
else:
self.tn.write(('4').encode('ascii'))
self.cIM += 1
print("pred:left")
else:
self.prediction.extend([2])
if self.begin:
self.tn.write(('1').encode('ascii'))
self.begin = False
else:
self.tn.write(('3').encode('ascii'))
self.cIM += 1
print("pred:right")
else:
self.prediction.extend([self.pred[-1]])
if self.pred[-1] == 0:
print("pred:rest")
elif self.pred[-1] == 1:
if self.begin:
# self.tn.write(('1').encode('ascii'))
self.begin = False
else:
# self.tn.write(('4').encode('ascii'))
self.cIM += 1
print("pred:left")
else:
if self.begin:
# self.tn.write(('1').encode('ascii'))
self.begin = False
else:
# self.tn.write(('3').encode('ascii'))
self.cIM += 1
print("pred:right")
self.vote = [0] * len(self.vote)
def main_loop(self):
self.load_bcis('train')
self.vote = [0,0,0]
self.pred = []
self.prediction = []
self.begin = True
self.cIM = 0
cSTR = 0
cVC = 0
cdur = 0
cIMdur = 0
dur = self.Prdur
buffer = []
while cSTR < self.recdur:
sample, timestamp = self.inlet.pull_sample()
buffer += [sample,]
if cdur % (dur * self.Fs) == 0:
if self.content[cVC] == 'REST':
print("REST")
self.delete_all()
cdur = 0
dur = self.Rdur
cVC = cVC+1
elif self.content[cVC] == 'LEFT':
print("LEFT")
self.left_arrow()
cdur = 0
try:
dur = self.IMdur[cIMdur]
cIMdur += 1
except:
dur = self.IMdur
cVC = cVC+1
elif self.content[cVC] == 'RIGHT':
print("RIGHT")
self.right_arrow()
cdur = 0
try:
dur = self.IMdur[cIMdur]
cIMdur += 1
except:
dur = self.IMdur
cVC = cVC+1
elif self.content[cVC]=='PREPARE':
self.Concentration_Cross()
cdur = 0
dur = self.Prdur
cVC = cVC+1
if cSTR > 0 and cSTR % self.step == 0: #and self.cIM == 0:
t1 = threading.Thread(target = self.pred_im, args=(buffer[-self.tim_window:],cSTR,))
t1.start()
# elif cSTR > 0 and cSTR % self.step == 0:
#
# if self.cIM == self.IM_window:
# self.cIM = 0
# else:
# self.cIM += 1
cSTR = cSTR + 1
cdur = cdur + 1
# self.tn.write(('0').encode('ascii'))
return buffer
def save_recording(self, buffer):
LABELS = []
trig = []
offset = (self.Rdur + self.Prdur) * self.Fs
trig += [offset,]
idxs=np.where(self.content=='REST')
self.content = np.delete(self.content,idxs)
idxs=np.where(self.content=='PREPARE')
self.content = np.delete(self.content,idxs)
try:
for i, IMdur in enumerate(self.IMdur):
trig += [IMdur * self.Fs + offset + trig[-1],]
LABELS += [0] * offset
if self.content[i] == 'LEFT':
LABELS += [1] * IMdur * self.Fs
else:
LABELS += [2] * IMdur * self.Fs
except:
for i, visual_cue in enumerate(self.content):
trig += [self.IMdur * self.Fs + offset + trig[-1],]
LABELS += [0] * offset
if visual_cue == 'LEFT':
LABELS += [1] * self.IMdur * self.Fs
else:
LABELS += [2] * self.IMdur * self.Fs
LABELS += [0] * self.Rdur * self. Fs
pred = [[pr] * self.vote_window for pr in self.prediction]
trig = np.array(trig)
trig = np.delete(trig,-1)
LABELS = np.array(LABELS)
buffer = np.array(buffer)
pred = np.array(pred).flatten()
# create matlab files
sio.savemat('trig.mat', {'trig':trig})
sio.savemat('rec.mat', {'rec':buffer})
sio.savemat('LABELS.mat', {'LABELS':LABELS})
sio.savemat('pred.mat', {'pred':pred})
if __name__ == '__main__':
b_c_i = rt(mode = 'CSP_OVR', tim_window = 4, vote_window = 8, overlap = 0.5,IM_window = 0)
b_c_i.load_bcis('train')
b_c_i.begin_stream()
buffer = b_c_i.main_loop()
b_c_i.save_recording(buffer)
| 31.412256 | 151 | 0.423162 |
from threading import Thread
import numpy as np
import scipy.io as sio
from pylsl import StreamInlet, resolve_stream
from tkinter import *
import telnetlib
import pickle
import threading
from graphics import Graphics
class rt(Graphics):
def __init__(self, mode = 'IMvsall', tim_window = 4, vote_window = 4, overlap = 0,
IM_window = 2, HOST = "192.168.4.1"):
if mode == 'IMvsall' or 'Rvsall' or 'IMvsRest' or 'CSP_OVR' or'sync':
self.mode = mode
else:
raise ValueError('Inappropriate mode value')
with open("visual_cues.txt") as f:
content = f.readlines()
content = [line.rstrip('\n') for line in content]
self.Fs = int(content[0])
self.Rdur = int(content[1])
self.Prdur = int(content[3])
content = np.array(content)
self.vcN = len(content)
idxs = np.where(content == 'REST')
self.RN = len(idxs[0])
idxs = np.where(content == 'PREPARE')
self.PRN = len(idxs[0])
self.IMN = len(content) - self.RN - self.PRN - 4
try:
self.IMdur = int(content[2])
self.recdur = self.RN * self.Rdur * self.Fs + self.IMN * self.IMdur * self.Fs + self.PRN * self.Prdur * self.Fs
except:
IMdur = list(content[2].split(','))
self.IMdur = [int(i) for i in IMdur]
self.IMdur = [np.random.randint(IMdur[0],IMdur[1]) for i in range(self.IMN)]
self.recdur = self.RN * self.Rdur * self.Fs + sum(self.IMdur) * self.Fs + self.PRN * self.Prdur * self.Fs
self.content = np.delete(content,np.s_[:4])
if self.mode == 'sync':
self.tim_window = self.IMdur * self.Fs
self.vote_window = self.IMdur * self.Fs
self.step = (self.IMdur + self.Prdur + self.Rdur) * self.Fs
self.IM_window = 1
self.IMdur = [self.IMdur] * self.IMN
else:
self.tim_window = tim_window * self.Fs
self.vote_window = vote_window * self.Fs
self.overlap = overlap
self.step = int(self.tim_window * (1 - self.overlap))
self.IM_window = IM_window
Graphics.__init__(self)
def load_bcis(self, filename):
with open(filename, 'rb') as train:
self.bcis = pickle.load(train)
return self
def begin_stream(self):
print("looking for an EEG stream...")
self.streams = resolve_stream('type', 'EEG')
self.inlet = StreamInlet(self.streams[0])
def pred_im(self, chunk, cSTR):
self.pred = []
chunk = (np.array(chunk).T)/1000000000
chunk = chunk.reshape((1,chunk.shape[0],chunk.shape[1]))
for i, bci in enumerate(self.bcis[:-1]):
self.pred.append(bci.predict(chunk))
if self.pred[i] != 0:
self.vote[i] += 1
else:
self.vote[i] -= 1
self.pred.append(self.bcis[-1].predict(chunk))
if self.pred[-1] == 1:
self.vote[-1] += 1
else:
self.vote[-1] -= 1
if cSTR % self.vote_window == 0:
self.pred_decision()
def pred_decision(self):
if self.mode == 'IMvsall' or self.mode == 'IMvsRest':
if self.vote[0] <= 0 and self.vote[1] <= 0:
self.prediction.extend([0])
print("pred:rest")
elif self.vote[0] > 0:
self.prediction.extend([1])
if self.begin:
self.begin = False
else:
self.cIM += 1
print("pred:left")
else:
self.prediction.extend([2])
if self.begin:
self.begin = False
else:
self.cIM += 1
print("pred:right")
elif self.mode == 'Rvsall':
if self.vote[0] <= 0:
self.prediction.extend([0])
print("pred:rest")
elif self.vote[1] > 0:
self.prediction.extend([1])
if self.begin:
self.tn.write(('1').encode('ascii'))
self.begin = False
else:
self.tn.write(('4').encode('ascii'))
self.cIM += 1
print("pred:left")
else:
self.prediction.extend([2])
if self.begin:
self.tn.write(('1').encode('ascii'))
self.begin = False
else:
self.tn.write(('3').encode('ascii'))
self.cIM += 1
print("pred:right")
else:
self.prediction.extend([self.pred[-1]])
if self.pred[-1] == 0:
print("pred:rest")
elif self.pred[-1] == 1:
if self.begin:
self.begin = False
else:
self.cIM += 1
print("pred:left")
else:
if self.begin:
self.begin = False
else:
self.cIM += 1
print("pred:right")
self.vote = [0] * len(self.vote)
def main_loop(self):
self.load_bcis('train')
self.vote = [0,0,0]
self.pred = []
self.prediction = []
self.begin = True
self.cIM = 0
cSTR = 0
cVC = 0
cdur = 0
cIMdur = 0
dur = self.Prdur
buffer = []
while cSTR < self.recdur:
sample, timestamp = self.inlet.pull_sample()
buffer += [sample,]
if cdur % (dur * self.Fs) == 0:
if self.content[cVC] == 'REST':
print("REST")
self.delete_all()
cdur = 0
dur = self.Rdur
cVC = cVC+1
elif self.content[cVC] == 'LEFT':
print("LEFT")
self.left_arrow()
cdur = 0
try:
dur = self.IMdur[cIMdur]
cIMdur += 1
except:
dur = self.IMdur
cVC = cVC+1
elif self.content[cVC] == 'RIGHT':
print("RIGHT")
self.right_arrow()
cdur = 0
try:
dur = self.IMdur[cIMdur]
cIMdur += 1
except:
dur = self.IMdur
cVC = cVC+1
elif self.content[cVC]=='PREPARE':
self.Concentration_Cross()
cdur = 0
dur = self.Prdur
cVC = cVC+1
if cSTR > 0 and cSTR % self.step == 0:
t1 = threading.Thread(target = self.pred_im, args=(buffer[-self.tim_window:],cSTR,))
t1.start()
cSTR = cSTR + 1
cdur = cdur + 1
return buffer
def save_recording(self, buffer):
LABELS = []
trig = []
offset = (self.Rdur + self.Prdur) * self.Fs
trig += [offset,]
idxs=np.where(self.content=='REST')
self.content = np.delete(self.content,idxs)
idxs=np.where(self.content=='PREPARE')
self.content = np.delete(self.content,idxs)
try:
for i, IMdur in enumerate(self.IMdur):
trig += [IMdur * self.Fs + offset + trig[-1],]
LABELS += [0] * offset
if self.content[i] == 'LEFT':
LABELS += [1] * IMdur * self.Fs
else:
LABELS += [2] * IMdur * self.Fs
except:
for i, visual_cue in enumerate(self.content):
trig += [self.IMdur * self.Fs + offset + trig[-1],]
LABELS += [0] * offset
if visual_cue == 'LEFT':
LABELS += [1] * self.IMdur * self.Fs
else:
LABELS += [2] * self.IMdur * self.Fs
LABELS += [0] * self.Rdur * self. Fs
pred = [[pr] * self.vote_window for pr in self.prediction]
trig = np.array(trig)
trig = np.delete(trig,-1)
LABELS = np.array(LABELS)
buffer = np.array(buffer)
pred = np.array(pred).flatten()
sio.savemat('trig.mat', {'trig':trig})
sio.savemat('rec.mat', {'rec':buffer})
sio.savemat('LABELS.mat', {'LABELS':LABELS})
sio.savemat('pred.mat', {'pred':pred})
if __name__ == '__main__':
b_c_i = rt(mode = 'CSP_OVR', tim_window = 4, vote_window = 8, overlap = 0.5,IM_window = 0)
b_c_i.load_bcis('train')
b_c_i.begin_stream()
buffer = b_c_i.main_loop()
b_c_i.save_recording(buffer)
| true | true |
f7249040b586c4a22845aeab9c8fe2659a7527ef | 1,126 | py | Python | python/6.net/3.Ext/1.shell_server.py | dunitian/BaseCode | 4855ef4c6dd7c95d7239d2048832d8acfe26e084 | [
"Apache-2.0"
] | 25 | 2018-06-13T08:13:44.000Z | 2020-11-19T14:02:11.000Z | python/6.net/3.Ext/1.shell_server.py | dunitian/BaseCode | 4855ef4c6dd7c95d7239d2048832d8acfe26e084 | [
"Apache-2.0"
] | null | null | null | python/6.net/3.Ext/1.shell_server.py | dunitian/BaseCode | 4855ef4c6dd7c95d7239d2048832d8acfe26e084 | [
"Apache-2.0"
] | 13 | 2018-06-13T08:13:38.000Z | 2022-01-06T06:45:07.000Z | from socket import socket
def main():
with socket() as tcp_socket:
tcp_socket.bind(('', 8080))
tcp_socket.listen()
client_socket, client_addr = tcp_socket.accept()
with client_socket:
print(f"[肉鸡{client_addr}已经上线:]\n")
while True:
cmd = input("$ ")
client_socket.send(cmd.encode("utf-8"))
data = client_socket.recv(2048)
if data:
print(data.decode("utf-8"))
if __name__ == "__main__":
main()
# from socketserver import ThreadingTCPServer, BaseRequestHandler
# class MyHandler(BaseRequestHandler):
# def handle(self):
# print(f"[肉鸡{self.client_address}已经上线:]\n")
# while True:
# cmd = input("$ ")
# self.request.send(cmd.encode("utf-8"))
# data = self.request.recv(2048)
# # if data:
# print(data.decode("utf-8"))
# if __name__ == "__main__":
# ThreadingTCPServer.allow_reuse_address = True
# with ThreadingTCPServer(('', 8080), MyHandler) as server:
# server.serve_forever()
| 29.631579 | 65 | 0.562167 | from socket import socket
def main():
with socket() as tcp_socket:
tcp_socket.bind(('', 8080))
tcp_socket.listen()
client_socket, client_addr = tcp_socket.accept()
with client_socket:
print(f"[肉鸡{client_addr}已经上线:]\n")
while True:
cmd = input("$ ")
client_socket.send(cmd.encode("utf-8"))
data = client_socket.recv(2048)
if data:
print(data.decode("utf-8"))
if __name__ == "__main__":
main()
| true | true |
f724904db04d50387041ae9ec1500db44a5daf4d | 17,114 | py | Python | src/models/trainer_ext.py | Katarina11/PreSumm | 616e72f038d512e9e9112af375d66a0b2e3db6cd | [
"MIT"
] | null | null | null | src/models/trainer_ext.py | Katarina11/PreSumm | 616e72f038d512e9e9112af375d66a0b2e3db6cd | [
"MIT"
] | null | null | null | src/models/trainer_ext.py | Katarina11/PreSumm | 616e72f038d512e9e9112af375d66a0b2e3db6cd | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
import distributed
from models.reporter_ext import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
def build_trainer(args, device_id, model, optim):
"""
Simplify `Trainer` creation based on user `opt`s*
Args:
opt (:obj:`Namespace`): user options (usually from argument parsing)
model (:obj:`onmt.models.NMTModel`): the model to train
fields (dict): dict of fields
optim (:obj:`onmt.utils.Optimizer`): optimizer used during training
data_type (str): string describing the type of data
e.g. "text", "img", "audio"
model_saver(:obj:`onmt.models.ModelSaverBase`): the utility object
used to save the model
"""
grad_accum_count = args.accum_count
n_gpu = args.world_size
if device_id >= 0:
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print('gpu_rank %d' % gpu_rank)
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)
# print(tr)
if (model):
n_params = _tally_parameters(model)
logger.info('* number of parameters: %d' % n_params)
return trainer
class Trainer(object):
"""
Class that controls the training process.
Args:
model(:py:class:`onmt.models.model.NMTModel`): translation model
to train
train_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
valid_loss(:obj:`onmt.utils.loss.LossComputeBase`):
training loss computation
optim(:obj:`onmt.utils.optimizers.Optimizer`):
the optimizer responsible for update
trunc_size(int): length of truncated back propagation through time
shard_size(int): compute loss in shards of this size for efficiency
data_type(string): type of the source input: [text|img|audio]
norm_method(string): normalization methods: [sents|tokens]
grad_accum_count(int): accumulate gradients this many times.
report_manager(:obj:`onmt.utils.ReportMgrBase`):
the object that creates reports, or None
model_saver(:obj:`onmt.models.ModelSaverBase`): the saver is
used to save a checkpoint.
Thus nothing will be saved if this parameter is None
"""
def __init__(self, args, model, optim,
grad_accum_count=1, n_gpu=1, gpu_rank=1,
report_manager=None):
# Basic attributes.
self.args = args
self.save_checkpoint_steps = args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
# Set model in training mode.
if (model):
self.model.train()
def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):
"""
The main training loops.
by iterating over training data (i.e. `train_iter_fct`)
and running validation (i.e. iterating over `valid_iter_fct`
Args:
train_iter_fct(function): a function that returns the train
iterator. e.g. something like
train_iter_fct = lambda: generator(*args, **kwargs)
valid_iter_fct(function): same as train_iter_fct, for valid data
train_steps(int):
valid_steps(int):
save_checkpoint_steps(int):
Return:
None
"""
logger.info('Start training...')
# step = self.optim._step + 1
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = Statistics()
report_stats = Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
true_batchs.append(batch)
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.n_gpu > 1:
normalization = sum(distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):
self._save(step)
step += 1
if step > train_steps:
break
train_iter = train_iter_fct()
return total_stats
def validate(self, valid_iter, step=0):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
self.model.eval()
stats = Statistics()
with torch.no_grad():
for batch in valid_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
self._report_step(0, step, valid_stats=stats)
return stats
def test(self, test_iter, step, cal_lead=False, cal_oracle=False):
""" Validate model.
valid_iter: validate data iterator
Returns:
:obj:`nmt.Statistics`: validation loss statistics
"""
# Set model in validating mode.
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p:
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
if (not cal_lead and not cal_oracle):
self.model.eval()
stats = Statistics()
can_path = '%s_step%d.candidate' % (self.args.result_path, step)
gold_path = '%s_step%d.gold' % (self.args.result_path, step)
##
src_path = '%s_step%d.src' % (self.args.result_path, step)
f = open(src_path, 'w')
##
sent_no = 0
with open(can_path, 'w') as save_pred:
with open(gold_path, 'w') as save_gold:
with torch.no_grad():
for batch in test_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
gold = []
pred = []
src_fix = []
if (cal_lead):
selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size
elif (cal_oracle):
selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in
range(batch.batch_size)]
else:
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
if labels.float().size()[1] != 0:
loss = self.loss(sent_scores, labels.float())
else:
continue
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
sent_scores = sent_scores + mask.float()
sent_scores = sent_scores.cpu().data.numpy()
selected_ids = np.argsort(-sent_scores, 1)
if len(selected_ids[0]) < 7:
continue
# selected_ids = np.sort(selected_ids,1)
for i, idx in enumerate(selected_ids):
_pred = []
if (len(batch.src_str[i]) == 0):
continue
for j in selected_ids[i][:len(batch.src_str[i])]:
if (j >= len(batch.src_str[i])):
continue
candidate = batch.src_str[i][j].strip()
if (self.args.block_trigram):
if (not _block_tri(candidate, _pred)):
_pred.append(candidate)
else:
_pred.append(candidate)
if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):
break
_pred = '<q>'.join(_pred)
if (self.args.recall_eval):
_pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])
pred.append(_pred)
gold.append(batch.tgt_str[i])
src_fix.append(batch.src_str[i])
sent_no += 1
# print(sent_no)
# print('gold', gold)
# print(gold_path)
for i in range(len(gold)):
save_gold.write(str(sent_no) + "_" + str(i) + ': ' + gold[i].strip() + '\n')
for i in range(len(pred)):
save_pred.write(str(sent_no) + "_" + str(i) + ': ' + pred[i].strip() + '\n')
for i in range(len(pred)):
f.write(str(sent_no) + "_" + str(i) + ': ' + '###'.join(src_fix[i]).strip()+'\n')
f.close()
if (step != -1 and self.args.report_rouge):
rouges = test_rouge(self.args.temp_dir, can_path, gold_path)
logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
self._report_step(0, step, valid_stats=stats)
return stats
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
(loss / loss.numel()).backward()
# loss.div(float(normalization)).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
# 4. Update the parameters and statistics.
if self.grad_accum_count == 1:
# Multi GPU gradient gather
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
# in case of multi step gradient accumulation,
# update only after accum batches
if self.grad_accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _save(self, step):
real_model = self.model
# real_generator = (self.generator.module
# if isinstance(self.generator, torch.nn.DataParallel)
# else self.generator)
model_state_dict = real_model.state_dict()
# generator_state_dict = real_generator.state_dict()
checkpoint = {
'model': model_state_dict,
# 'generator': generator_state_dict,
'opt': self.args,
'optims': self.optim,
}
checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)
logger.info("Saving checkpoint %s" % checkpoint_path)
# checkpoint_path = '%s_step_%d.pt' % (FLAGS.model_path, step)
if (not os.path.exists(checkpoint_path)):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
"""
Simple function to start report manager (if any)
"""
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
"""
Gather statistics in multi-processes cases
Args:
stat(:obj:onmt.utils.Statistics): a Statistics object to gather
or None (it returns None in this case)
Returns:
stat: the updated (or unchanged) stat object
"""
if stat is not None and self.n_gpu > 1:
return Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
"""
Simple function to report training stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_training` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
"""
Simple function to report stats (if report_manager is set)
see `onmt.utils.ReportManagerBase.report_step` for doc
"""
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
def _maybe_save(self, step):
"""
Save the model if a model saver is set
"""
if self.model_saver is not None:
self.model_saver.maybe_save(step)
| 38.895455 | 112 | 0.521678 | import os
import numpy as np
import torch
from tensorboardX import SummaryWriter
import distributed
from models.reporter_ext import ReportMgr, Statistics
from others.logging import logger
from others.utils import test_rouge, rouge_results_to_str
def _tally_parameters(model):
n_params = sum([p.nelement() for p in model.parameters()])
return n_params
def build_trainer(args, device_id, model, optim):
grad_accum_count = args.accum_count
n_gpu = args.world_size
if device_id >= 0:
gpu_rank = int(args.gpu_ranks[device_id])
else:
gpu_rank = 0
n_gpu = 0
print('gpu_rank %d' % gpu_rank)
tensorboard_log_dir = args.model_path
writer = SummaryWriter(tensorboard_log_dir, comment="Unmt")
report_manager = ReportMgr(args.report_every, start_time=-1, tensorboard_writer=writer)
trainer = Trainer(args, model, optim, grad_accum_count, n_gpu, gpu_rank, report_manager)
if (model):
n_params = _tally_parameters(model)
logger.info('* number of parameters: %d' % n_params)
return trainer
class Trainer(object):
def __init__(self, args, model, optim,
grad_accum_count=1, n_gpu=1, gpu_rank=1,
report_manager=None):
self.args = args
self.save_checkpoint_steps = args.save_checkpoint_steps
self.model = model
self.optim = optim
self.grad_accum_count = grad_accum_count
self.n_gpu = n_gpu
self.gpu_rank = gpu_rank
self.report_manager = report_manager
self.loss = torch.nn.BCELoss(reduction='none')
assert grad_accum_count > 0
if (model):
self.model.train()
def train(self, train_iter_fct, train_steps, valid_iter_fct=None, valid_steps=-1):
logger.info('Start training...')
step = self.optim._step + 1
true_batchs = []
accum = 0
normalization = 0
train_iter = train_iter_fct()
total_stats = Statistics()
report_stats = Statistics()
self._start_report_manager(start_time=total_stats.start_time)
while step <= train_steps:
reduce_counter = 0
for i, batch in enumerate(train_iter):
if self.n_gpu == 0 or (i % self.n_gpu == self.gpu_rank):
true_batchs.append(batch)
normalization += batch.batch_size
accum += 1
if accum == self.grad_accum_count:
reduce_counter += 1
if self.n_gpu > 1:
normalization = sum(distributed
.all_gather_list
(normalization))
self._gradient_accumulation(
true_batchs, normalization, total_stats,
report_stats)
report_stats = self._maybe_report_training(
step, train_steps,
self.optim.learning_rate,
report_stats)
true_batchs = []
accum = 0
normalization = 0
if (step % self.save_checkpoint_steps == 0 and self.gpu_rank == 0):
self._save(step)
step += 1
if step > train_steps:
break
train_iter = train_iter_fct()
return total_stats
def validate(self, valid_iter, step=0):
self.model.eval()
stats = Statistics()
with torch.no_grad():
for batch in valid_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
self._report_step(0, step, valid_stats=stats)
return stats
def test(self, test_iter, step, cal_lead=False, cal_oracle=False):
def _get_ngrams(n, text):
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _block_tri(c, p):
tri_c = _get_ngrams(3, c.split())
for s in p:
tri_s = _get_ngrams(3, s.split())
if len(tri_c.intersection(tri_s)) > 0:
return True
return False
if (not cal_lead and not cal_oracle):
self.model.eval()
stats = Statistics()
can_path = '%s_step%d.candidate' % (self.args.result_path, step)
gold_path = '%s_step%d.gold' % (self.args.result_path, step)
src_path = '%s_step%d.src' % (self.args.result_path, step)
f = open(src_path, 'w')
sent_no = 0
with open(can_path, 'w') as save_pred:
with open(gold_path, 'w') as save_gold:
with torch.no_grad():
for batch in test_iter:
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
gold = []
pred = []
src_fix = []
if (cal_lead):
selected_ids = [list(range(batch.clss.size(1)))] * batch.batch_size
elif (cal_oracle):
selected_ids = [[j for j in range(batch.clss.size(1)) if labels[i][j] == 1] for i in
range(batch.batch_size)]
else:
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
if labels.float().size()[1] != 0:
loss = self.loss(sent_scores, labels.float())
else:
continue
loss = (loss * mask.float()).sum()
batch_stats = Statistics(float(loss.cpu().data.numpy()), len(labels))
stats.update(batch_stats)
sent_scores = sent_scores + mask.float()
sent_scores = sent_scores.cpu().data.numpy()
selected_ids = np.argsort(-sent_scores, 1)
if len(selected_ids[0]) < 7:
continue
for i, idx in enumerate(selected_ids):
_pred = []
if (len(batch.src_str[i]) == 0):
continue
for j in selected_ids[i][:len(batch.src_str[i])]:
if (j >= len(batch.src_str[i])):
continue
candidate = batch.src_str[i][j].strip()
if (self.args.block_trigram):
if (not _block_tri(candidate, _pred)):
_pred.append(candidate)
else:
_pred.append(candidate)
if ((not cal_oracle) and (not self.args.recall_eval) and len(_pred) == 3):
break
_pred = '<q>'.join(_pred)
if (self.args.recall_eval):
_pred = ' '.join(_pred.split()[:len(batch.tgt_str[i].split())])
pred.append(_pred)
gold.append(batch.tgt_str[i])
src_fix.append(batch.src_str[i])
sent_no += 1
for i in range(len(gold)):
save_gold.write(str(sent_no) + "_" + str(i) + ': ' + gold[i].strip() + '\n')
for i in range(len(pred)):
save_pred.write(str(sent_no) + "_" + str(i) + ': ' + pred[i].strip() + '\n')
for i in range(len(pred)):
f.write(str(sent_no) + "_" + str(i) + ': ' + '###'.join(src_fix[i]).strip()+'\n')
f.close()
if (step != -1 and self.args.report_rouge):
rouges = test_rouge(self.args.temp_dir, can_path, gold_path)
logger.info('Rouges at step %d \n%s' % (step, rouge_results_to_str(rouges)))
self._report_step(0, step, valid_stats=stats)
return stats
def _gradient_accumulation(self, true_batchs, normalization, total_stats,
report_stats):
if self.grad_accum_count > 1:
self.model.zero_grad()
for batch in true_batchs:
if self.grad_accum_count == 1:
self.model.zero_grad()
src = batch.src
labels = batch.src_sent_labels
segs = batch.segs
clss = batch.clss
mask = batch.mask_src
mask_cls = batch.mask_cls
sent_scores, mask = self.model(src, segs, clss, mask, mask_cls)
loss = self.loss(sent_scores, labels.float())
loss = (loss * mask.float()).sum()
(loss / loss.numel()).backward()
batch_stats = Statistics(float(loss.cpu().data.numpy()), normalization)
total_stats.update(batch_stats)
report_stats.update(batch_stats)
if self.grad_accum_count == 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
if self.grad_accum_count > 1:
if self.n_gpu > 1:
grads = [p.grad.data for p in self.model.parameters()
if p.requires_grad
and p.grad is not None]
distributed.all_reduce_and_rescale_tensors(
grads, float(1))
self.optim.step()
def _save(self, step):
real_model = self.model
model_state_dict = real_model.state_dict()
checkpoint = {
'model': model_state_dict,
'opt': self.args,
'optims': self.optim,
}
checkpoint_path = os.path.join(self.args.model_path, 'model_step_%d.pt' % step)
logger.info("Saving checkpoint %s" % checkpoint_path)
if (not os.path.exists(checkpoint_path)):
torch.save(checkpoint, checkpoint_path)
return checkpoint, checkpoint_path
def _start_report_manager(self, start_time=None):
if self.report_manager is not None:
if start_time is None:
self.report_manager.start()
else:
self.report_manager.start_time = start_time
def _maybe_gather_stats(self, stat):
if stat is not None and self.n_gpu > 1:
return Statistics.all_gather_stats(stat)
return stat
def _maybe_report_training(self, step, num_steps, learning_rate,
report_stats):
if self.report_manager is not None:
return self.report_manager.report_training(
step, num_steps, learning_rate, report_stats,
multigpu=self.n_gpu > 1)
def _report_step(self, learning_rate, step, train_stats=None,
valid_stats=None):
if self.report_manager is not None:
return self.report_manager.report_step(
learning_rate, step, train_stats=train_stats,
valid_stats=valid_stats)
def _maybe_save(self, step):
if self.model_saver is not None:
self.model_saver.maybe_save(step)
| true | true |
f724910c59315867a42a56fab3deb36f5d3adb7a | 46,062 | py | Python | tensorflow/contrib/bayesflow/python/ops/hmc_impl.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 5 | 2019-05-23T02:59:21.000Z | 2020-02-05T08:20:23.000Z | tensorflow/contrib/bayesflow/python/ops/hmc_impl.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 2 | 2017-08-01T21:11:06.000Z | 2017-08-01T23:07:02.000Z | tensorflow/contrib/bayesflow/python/ops/hmc_impl.py | M155K4R4/Tensorflow | e5e03ef3148303b3dfed89a1492dedf92b45be25 | [
"Apache-2.0"
] | 2 | 2019-09-05T06:43:24.000Z | 2019-09-07T07:58:34.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Hamiltonian Monte Carlo, a gradient-based MCMC algorithm.
@@sample_chain
@@sample_annealed_importance_chain
@@kernel
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl as gradients_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import util as distributions_util
__all__ = [
"sample_chain",
"sample_annealed_importance_chain",
"kernel",
]
KernelResults = collections.namedtuple(
"KernelResults",
[
"acceptance_probs",
"current_grads_target_log_prob", # "Current result" means "accepted".
"current_target_log_prob", # "Current result" means "accepted".
"energy_change",
"is_accepted",
"proposed_grads_target_log_prob",
"proposed_state",
"proposed_target_log_prob",
"random_positive",
])
def _make_dummy_kernel_results(
dummy_state,
dummy_target_log_prob,
dummy_grads_target_log_prob):
return KernelResults(
acceptance_probs=dummy_target_log_prob,
current_grads_target_log_prob=dummy_grads_target_log_prob,
current_target_log_prob=dummy_target_log_prob,
energy_change=dummy_target_log_prob,
is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool),
proposed_grads_target_log_prob=dummy_grads_target_log_prob,
proposed_state=dummy_state,
proposed_target_log_prob=dummy_target_log_prob,
random_positive=dummy_target_log_prob,
)
def sample_chain(
num_results,
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
num_burnin_steps=0,
num_steps_between_results=0,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Runs multiple iterations of one or more Hamiltonian Monte Carlo chains.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC) algorithm
that takes a series of gradient-informed steps to produce a Metropolis
proposal. This function samples from an HMC Markov chain at `current_state`
and whose stationary distribution has log-unnormalized-density
`target_log_prob_fn()`.
This function samples from multiple chains in parallel. It assumes that the
the leftmost dimensions of (each) `current_state` (part) index an independent
chain. The function `target_log_prob_fn()` sums log-probabilities across
event dimensions (i.e., current state (part) rightmost dimensions). Each
element of the output of `target_log_prob_fn()` represents the (possibly
unnormalized) log-probability of the joint distribution over (all) the current
state (parts).
The `current_state` can be represented as a single `Tensor` or a `list` of
`Tensors` which collectively represent the current state. When specifying a
`list`, one must also specify a list of `step_size`s.
Note: `target_log_prob_fn` is called exactly twice.
Only one out of every `num_steps_between_samples + 1` steps is included in the
returned results. This "thinning" comes at a cost of reduced statistical
power, while reducing memory requirements and autocorrelation. For more
discussion see [1].
[1]: "Statistically efficient thinning of a Markov chain sampler."
Art B. Owen. April 2017.
http://statweb.stanford.edu/~owen/reports/bestthinning.pdf
#### Examples:
##### Sample from a diagonal-variance Gaussian.
```python
tfd = tf.contrib.distributions
def make_likelihood(true_variances):
return tfd.MultivariateNormalDiag(
scale_diag=tf.sqrt(true_variances))
dims = 10
dtype = np.float32
true_variances = tf.linspace(dtype(1), dtype(3), dims)
likelihood = make_likelihood(true_variances)
states, kernel_results = hmc.sample_chain(
num_results=1000,
target_log_prob_fn=likelihood.log_prob,
current_state=tf.zeros(dims),
step_size=0.5,
num_leapfrog_steps=2,
num_burnin_steps=500)
# Compute sample stats.
sample_mean = tf.reduce_mean(states, axis=0)
sample_var = tf.reduce_mean(
tf.squared_difference(states, sample_mean),
axis=0)
```
##### Sampling from factor-analysis posteriors with known factors.
I.e.,
```none
for i=1..n:
w[i] ~ Normal(0, eye(d)) # prior
x[i] ~ Normal(loc=matmul(w[i], F)) # likelihood
```
where `F` denotes factors.
```python
tfd = tf.contrib.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, factors):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, factors, axes=[[0], [-1]]))
# Setup data.
num_weights = 10
num_factors = 4
num_chains = 100
dtype = np.float32
prior = make_prior(num_weights, dtype)
weights = prior.sample(num_chains)
factors = np.random.randn(num_factors, num_weights).astype(dtype)
x = make_likelihood(weights, factors).sample(num_chains)
def target_log_prob(w):
# Target joint is: `f(w) = p(w, x | factors)`.
return prior.log_prob(w) + make_likelihood(w, factors).log_prob(x)
# Get `num_results` samples from `num_chains` independent chains.
chains_states, kernels_results = hmc.sample_chain(
num_results=1000,
target_log_prob_fn=target_log_prob,
current_state=tf.zeros([num_chains, dims], dtype),
step_size=0.1,
num_leapfrog_steps=2,
num_burnin_steps=500)
# Compute sample stats.
sample_mean = tf.reduce_mean(chains_states, axis=[0, 1])
sample_var = tf.reduce_mean(
tf.squared_difference(chains_states, sample_mean),
axis=[0, 1])
```
Args:
num_results: Integer number of Markov chain draws.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step size
for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but too-large
step sizes make rejection exponentially more likely. When possible, it's
often helpful to match per-variable step sizes to the standard deviations
of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
num_burnin_steps: Integer number of chain steps to take before starting to
collect results.
Default value: 0 (i.e., no burn-in).
num_steps_between_results: Integer number of chain steps between collecting
a result. Only one out of every `num_steps_between_samples + 1` steps is
included in the returned results. This "thinning" comes at a cost of
reduced statistical power, while reducing memory requirements and
autocorrelation. For more discussion see [1].
Default value: 0 (i.e., no subsampling).
seed: Python integer to seed the random number generator.
current_target_log_prob: (Optional) `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`. The only reason to specify
this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
current_grads_target_log_prob: (Optional) Python list of `Tensor`s
representing gradient of `target_log_prob` at the `current_state` and wrt
the `current_state`. Must have same shape as `current_state`. The only
reason to specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_sample_chain").
Returns:
accepted_states: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state` but with a prepended `num_results`-size dimension.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
with ops.name_scope(
name, "hmc_sample_chain",
[num_results, current_state, step_size, num_leapfrog_steps,
num_burnin_steps, num_steps_between_results, seed,
current_target_log_prob, current_grads_target_log_prob]):
with ops.name_scope("initialize"):
[
current_state,
step_size,
current_target_log_prob,
current_grads_target_log_prob,
] = _prepare_args(
target_log_prob_fn,
current_state,
step_size,
current_target_log_prob,
current_grads_target_log_prob)
num_results = ops.convert_to_tensor(
num_results,
dtype=dtypes.int32,
name="num_results")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
num_burnin_steps = ops.convert_to_tensor(
num_burnin_steps,
dtype=dtypes.int32,
name="num_burnin_steps")
num_steps_between_results = ops.convert_to_tensor(
num_steps_between_results,
dtype=dtypes.int32,
name="num_steps_between_results")
def _run_chain(num_steps, current_state, kernel_results):
"""Runs the chain(s) for `num_steps`."""
def _loop_body(iter_, current_state, kernel_results):
return [iter_ + 1] + list(kernel(
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed,
kernel_results.current_target_log_prob,
kernel_results.current_grads_target_log_prob))
while_loop_kwargs = dict(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0),
current_state,
kernel_results,
],
)
if seed is not None:
while_loop_kwargs["parallel_iterations"] = 1
return control_flow_ops.while_loop(
**while_loop_kwargs)[1:] # Lop-off "iter_".
def _scan_body(args_list, iter_):
"""Closure which implements `tf.scan` body."""
current_state, kernel_results = args_list
return _run_chain(
1 + array_ops.where(math_ops.equal(iter_, 0),
num_burnin_steps,
num_steps_between_results),
current_state,
kernel_results)
scan_kwargs = dict(
fn=_scan_body,
elems=math_ops.range(num_results), # iter_: used to choose burnin.
initializer=[
current_state,
_make_dummy_kernel_results(
current_state,
current_target_log_prob,
current_grads_target_log_prob),
])
if seed is not None:
scan_kwargs["parallel_iterations"] = 1
return functional_ops.scan(**scan_kwargs)
def sample_annealed_importance_chain(
proposal_log_prob_fn,
num_steps,
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed=None,
name=None):
"""Runs annealed importance sampling (AIS) to estimate normalizing constants.
This function uses Hamiltonian Monte Carlo to sample from a series of
distributions that slowly interpolates between an initial "proposal"
distribution:
`exp(proposal_log_prob_fn(x) - proposal_log_normalizer)`
and the target distribution:
`exp(target_log_prob_fn(x) - target_log_normalizer)`,
accumulating importance weights along the way. The product of these
importance weights gives an unbiased estimate of the ratio of the
normalizing constants of the initial distribution and the target
distribution:
`E[exp(ais_weights)] = exp(target_log_normalizer - proposal_log_normalizer)`.
Note: `proposal_log_prob_fn` and `target_log_prob_fn` are called exactly three
times (although this may be reduced to two times, in the future).
#### Examples:
##### Estimate the normalizing constant of a log-gamma distribution.
```python
tfd = tf.contrib.distributions
# Run 100 AIS chains in parallel
num_chains = 100
dims = 20
dtype = np.float32
proposal = tfd.MultivatiateNormalDiag(
loc=tf.zeros([dims], dtype=dtype))
target = tfd.TransformedDistribution(
distribution=tfd.Gamma(concentration=dtype(2),
rate=dtype(3)),
bijector=tfd.bijectors.Invert(tfd.bijectors.Exp()),
event_shape=[dims])
chains_state, ais_weights, kernels_results = (
hmc.sample_annealed_importance_chain(
proposal_log_prob_fn=proposal.log_prob,
num_steps=1000,
target_log_prob_fn=target.log_prob,
step_size=0.2,
current_state=proposal.sample(num_chains),
num_leapfrog_steps=2))
log_estimated_normalizer = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
log_true_normalizer = tf.lgamma(2.) - 2. * tf.log(3.)
```
##### Estimate marginal likelihood of a Bayesian regression model.
```python
tfd = tf.contrib.distributions
def make_prior(dims, dtype):
return tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
def make_likelihood(weights, x):
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(weights, x, axes=[[0], [-1]]))
# Run 100 AIS chains in parallel
num_chains = 100
dims = 10
dtype = np.float32
# Make training data.
x = np.random.randn(num_chains, dims).astype(dtype)
true_weights = np.random.randn(dims).astype(dtype)
y = np.dot(x, true_weights) + np.random.randn(num_chains)
# Setup model.
prior = make_prior(dims, dtype)
def target_log_prob_fn(weights):
return prior.log_prob(weights) + make_likelihood(weights, x).log_prob(y)
proposal = tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype))
weight_samples, ais_weights, kernel_results = (
hmc.sample_annealed_importance_chain(
num_steps=1000,
proposal_log_prob_fn=proposal.log_prob,
target_log_prob_fn=target_log_prob_fn
current_state=tf.zeros([num_chains, dims], dtype),
step_size=0.1,
num_leapfrog_steps=2))
log_normalizer_estimate = (tf.reduce_logsumexp(ais_weights)
- np.log(num_chains))
```
Args:
proposal_log_prob_fn: Python callable that returns the log density of the
initial distribution.
num_steps: Integer number of Markov chain updates to run. More
iterations means more expense, but smoother annealing between q
and p, which in turn means exponentially lower variance for the
normalizing constant estimator.
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step size
for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but too-large
step sizes make rejection exponentially more likely. When possible, it's
often helpful to match per-variable step sizes to the standard deviations
of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
seed: Python integer to seed the random number generator.
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_sample_annealed_importance_chain").
Returns:
accepted_state: `Tensor` or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at the final iteration. Has same shape as
input `current_state`.
ais_weights: Tensor with the estimated weight(s). Has shape matching
`target_log_prob_fn(current_state)`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
"""
def make_convex_combined_log_prob_fn(iter_):
def _fn(*args):
p = proposal_log_prob_fn(*args)
t = target_log_prob_fn(*args)
dtype = p.dtype.base_dtype
beta = (math_ops.cast(iter_ + 1, dtype)
/ math_ops.cast(num_steps, dtype))
return (1. - beta) * p + beta * t
return _fn
with ops.name_scope(
name, "hmc_sample_annealed_importance_chain",
[num_steps, current_state, step_size, num_leapfrog_steps, seed]):
with ops.name_scope("initialize"):
[
current_state,
step_size,
current_log_prob,
current_grads_log_prob,
] = _prepare_args(
make_convex_combined_log_prob_fn(iter_=0),
current_state,
step_size,
description="convex_combined_log_prob")
num_steps = ops.convert_to_tensor(
num_steps,
dtype=dtypes.int32,
name="num_steps")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
def _loop_body(iter_, ais_weights, current_state, kernel_results):
"""Closure which implements `tf.while_loop` body."""
current_state_parts = (list(current_state)
if _is_list_like(current_state)
else [current_state])
# TODO(b/72994218): Consider refactoring things to avoid this unecessary
# call.
ais_weights += ((target_log_prob_fn(*current_state_parts)
- proposal_log_prob_fn(*current_state_parts))
/ math_ops.cast(num_steps, ais_weights.dtype))
return [iter_ + 1, ais_weights] + list(kernel(
make_convex_combined_log_prob_fn(iter_),
current_state,
step_size,
num_leapfrog_steps,
seed,
kernel_results.current_target_log_prob,
kernel_results.current_grads_target_log_prob))
while_loop_kwargs = dict(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
array_ops.zeros_like(current_log_prob), # ais_weights
current_state,
_make_dummy_kernel_results(current_state,
current_log_prob,
current_grads_log_prob),
])
if seed is not None:
while_loop_kwargs["parallel_iterations"] = 1
[ais_weights, current_state, kernel_results] = control_flow_ops.while_loop(
**while_loop_kwargs)[1:] # Lop-off "iter_".
return [current_state, ais_weights, kernel_results]
def kernel(target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Runs one iteration of Hamiltonian Monte Carlo.
Hamiltonian Monte Carlo (HMC) is a Markov chain Monte Carlo (MCMC)
algorithm that takes a series of gradient-informed steps to produce
a Metropolis proposal. This function applies one step of HMC to
randomly update the variable `x`.
This function can update multiple chains in parallel. It assumes that all
leftmost dimensions of `current_state` index independent chain states (and are
therefore updated independently). The output of `target_log_prob_fn()` should
sum log-probabilities across all event dimensions. Slices along the rightmost
dimensions may have different target distributions; for example,
`current_state[0, :]` could have a different target distribution from
`current_state[1, :]`. This is up to `target_log_prob_fn()`. (The number of
independent chains is `tf.size(target_log_prob_fn(*current_state))`.)
#### Examples:
##### Simple chain with warm-up.
```python
tfd = tf.contrib.distributions
# Tuning acceptance rates:
dtype = np.float32
target_accept_rate = 0.631
num_warmup_iter = 500
num_chain_iter = 500
x = tf.get_variable(name="x", initializer=dtype(1))
step_size = tf.get_variable(name="step_size", initializer=dtype(1))
target = tfd.Normal(loc=dtype(0), scale=dtype(1))
new_x, other_results = hmc.kernel(
target_log_prob_fn=target.log_prob,
current_state=x,
step_size=step_size,
num_leapfrog_steps=3)[:4]
x_update = x.assign(new_x)
step_size_update = step_size.assign_add(
step_size * tf.where(
other_results.acceptance_probs > target_accept_rate,
0.01, -0.01))
warmup = tf.group([x_update, step_size_update])
tf.global_variables_initializer().run()
sess.graph.finalize() # No more graph building.
# Warm up the sampler and adapt the step size
for _ in xrange(num_warmup_iter):
sess.run(warmup)
# Collect samples without adapting step size
samples = np.zeros([num_chain_iter])
for i in xrange(num_chain_iter):
_, x_, target_log_prob_, grad_ = sess.run([
x_update,
x,
other_results.target_log_prob,
other_results.grads_target_log_prob])
samples[i] = x_
print(samples.mean(), samples.std())
```
##### Sample from more complicated posterior.
I.e.,
```none
W ~ MVN(loc=0, scale=sigma * eye(dims))
for i=1...num_samples:
X[i] ~ MVN(loc=0, scale=eye(dims))
eps[i] ~ Normal(loc=0, scale=1)
Y[i] = X[i].T * W + eps[i]
```
```python
tfd = tf.contrib.distributions
def make_training_data(num_samples, dims, sigma):
dt = np.asarray(sigma).dtype
zeros = tf.zeros(dims, dtype=dt)
x = tfd.MultivariateNormalDiag(
loc=zeros).sample(num_samples, seed=1)
w = tfd.MultivariateNormalDiag(
loc=zeros,
scale_identity_multiplier=sigma).sample(seed=2)
noise = tfd.Normal(
loc=dt(0),
scale=dt(1)).sample(num_samples, seed=3)
y = tf.tensordot(x, w, axes=[[1], [0]]) + noise
return y, x, w
def make_prior(sigma, dims):
# p(w | sigma)
return tfd.MultivariateNormalDiag(
loc=tf.zeros([dims], dtype=sigma.dtype),
scale_identity_multiplier=sigma)
def make_likelihood(x, w):
# p(y | x, w)
return tfd.MultivariateNormalDiag(
loc=tf.tensordot(x, w, axes=[[1], [0]]))
# Setup assumptions.
dtype = np.float32
num_samples = 150
dims = 10
num_iters = int(5e3)
true_sigma = dtype(0.5)
y, x, true_weights = make_training_data(num_samples, dims, true_sigma)
# Estimate of `log(true_sigma)`.
log_sigma = tf.get_variable(name="log_sigma", initializer=dtype(0))
sigma = tf.exp(log_sigma)
# State of the Markov chain.
weights = tf.get_variable(
name="weights",
initializer=np.random.randn(dims).astype(dtype))
prior = make_prior(sigma, dims)
def joint_log_prob_fn(w):
# f(w) = log p(w, y | x)
return prior.log_prob(w) + make_likelihood(x, w).log_prob(y)
weights_update = weights.assign(
hmc.kernel(target_log_prob_fn=joint_log_prob,
current_state=weights,
step_size=0.1,
num_leapfrog_steps=5)[0])
with tf.control_dependencies([weights_update]):
loss = -prior.log_prob(weights)
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
log_sigma_update = optimizer.minimize(loss, var_list=[log_sigma])
sess.graph.finalize() # No more graph building.
tf.global_variables_initializer().run()
sigma_history = np.zeros(num_iters, dtype)
weights_history = np.zeros([num_iters, dims], dtype)
for i in xrange(num_iters):
_, sigma_, weights_, _ = sess.run([log_sigma_update, sigma, weights])
weights_history[i, :] = weights_
sigma_history[i] = sigma_
true_weights_ = sess.run(true_weights)
# Should converge to something close to true_sigma.
plt.plot(sigma_history);
plt.ylabel("sigma");
plt.xlabel("iteration");
```
Args:
target_log_prob_fn: Python callable which takes an argument like
`current_state` (or `*current_state` if it's a list) and returns its
(possibly unnormalized) log-density under the target distribution.
current_state: `Tensor` or Python `list` of `Tensor`s representing the
current state(s) of the Markov chain(s). The first `r` dimensions index
independent chains, `r = tf.rank(target_log_prob_fn(*current_state))`.
step_size: `Tensor` or Python `list` of `Tensor`s representing the step size
for the leapfrog integrator. Must broadcast with the shape of
`current_state`. Larger step sizes lead to faster progress, but too-large
step sizes make rejection exponentially more likely. When possible, it's
often helpful to match per-variable step sizes to the standard deviations
of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
seed: Python integer to seed the random number generator.
current_target_log_prob: (Optional) `Tensor` representing the value of
`target_log_prob_fn` at the `current_state`. The only reason to
specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
current_grads_target_log_prob: (Optional) Python list of `Tensor`s
representing gradient of `current_target_log_prob` at the `current_state`
and wrt the `current_state`. Must have same shape as `current_state`. The
only reason to specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_kernel").
Returns:
accepted_state: Tensor or Python list of `Tensor`s representing the state(s)
of the Markov chain(s) at each result step. Has same shape as
`current_state`.
kernel_results: `collections.namedtuple` of internal calculations used to
advance the chain.
Raises:
ValueError: if there isn't one `step_size` or a list with same length as
`current_state`.
"""
with ops.name_scope(
name, "hmc_kernel",
[current_state, step_size, num_leapfrog_steps, seed,
current_target_log_prob, current_grads_target_log_prob]):
with ops.name_scope("initialize"):
[current_state_parts, step_sizes, current_target_log_prob,
current_grads_target_log_prob] = _prepare_args(
target_log_prob_fn, current_state, step_size,
current_target_log_prob, current_grads_target_log_prob,
maybe_expand=True)
independent_chain_ndims = distributions_util.prefer_static_rank(
current_target_log_prob)
current_momentums = []
for s in current_state_parts:
current_momentums.append(random_ops.random_normal(
shape=array_ops.shape(s),
dtype=s.dtype.base_dtype,
seed=seed))
seed = distributions_util.gen_new_seed(
seed, salt="hmc_kernel_momentums")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
[
proposed_momentums,
proposed_state_parts,
proposed_target_log_prob,
proposed_grads_target_log_prob,
] = _leapfrog_integrator(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
num_leapfrog_steps,
current_target_log_prob,
current_grads_target_log_prob)
energy_change = _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims)
# u < exp(min(-energy, 0)), where u~Uniform[0,1)
# ==> -log(u) >= max(e, 0)
# ==> -log(u) >= e
# (Perhaps surprisingly, we don't have a better way to obtain a random
# uniform from positive reals, i.e., `tf.random_uniform(minval=0,
# maxval=np.inf)` won't work.)
random_uniform = random_ops.random_uniform(
shape=array_ops.shape(energy_change),
dtype=energy_change.dtype,
seed=seed)
random_positive = -math_ops.log(random_uniform)
is_accepted = random_positive >= energy_change
accepted_target_log_prob = array_ops.where(is_accepted,
proposed_target_log_prob,
current_target_log_prob)
accepted_state_parts = [_choose(is_accepted,
proposed_state_part,
current_state_part,
independent_chain_ndims)
for current_state_part, proposed_state_part
in zip(current_state_parts, proposed_state_parts)]
accepted_grads_target_log_prob = [
_choose(is_accepted,
proposed_grad,
grad,
independent_chain_ndims)
for proposed_grad, grad
in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)]
maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0]
return [
maybe_flatten(accepted_state_parts),
KernelResults(
acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)),
current_grads_target_log_prob=accepted_grads_target_log_prob,
current_target_log_prob=accepted_target_log_prob,
energy_change=energy_change,
is_accepted=is_accepted,
proposed_grads_target_log_prob=proposed_grads_target_log_prob,
proposed_state=maybe_flatten(proposed_state_parts),
proposed_target_log_prob=proposed_target_log_prob,
random_positive=random_positive,
),
]
def _leapfrog_integrator(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
num_leapfrog_steps,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
"""Applies `num_leapfrog_steps` of the leapfrog integrator.
Assumes a simple quadratic kinetic energy function: `0.5 ||momentum||**2`.
#### Examples:
##### Simple quadratic potential.
```python
tfd = tf.contrib.distributions
dims = 10
num_iter = int(1e3)
dtype = np.float32
position = tf.placeholder(np.float32)
momentum = tf.placeholder(np.float32)
[
new_momentums,
new_positions,
] = hmc._leapfrog_integrator(
current_momentums=[momentum],
target_log_prob_fn=tfd.MultivariateNormalDiag(
loc=tf.zeros(dims, dtype)).log_prob,
current_state_parts=[position],
step_sizes=0.1,
num_leapfrog_steps=3)[:2]
sess.graph.finalize() # No more graph building.
momentum_ = np.random.randn(dims).astype(dtype)
position_ = np.random.randn(dims).astype(dtype)
positions = np.zeros([num_iter, dims], dtype)
for i in xrange(num_iter):
position_, momentum_ = sess.run(
[new_momentums[0], new_position[0]],
feed_dict={position: position_, momentum: momentum_})
positions[i] = position_
plt.plot(positions[:, 0]); # Sinusoidal.
```
Args:
current_momentums: Tensor containing the value(s) of the momentum
variable(s) to update.
target_log_prob_fn: Python callable which takes an argument like
`*current_state_parts` and returns its (possibly unnormalized) log-density
under the target distribution.
current_state_parts: Python `list` of `Tensor`s representing the current
state(s) of the Markov chain(s). The first `independent_chain_ndims` of
the `Tensor`(s) index different chains.
step_sizes: Python `list` of `Tensor`s representing the step size for the
leapfrog integrator. Must broadcast with the shape of
`current_state_parts`. Larger step sizes lead to faster progress, but
too-large step sizes make rejection exponentially more likely. When
possible, it's often helpful to match per-variable step sizes to the
standard deviations of the target distribution in each variable.
num_leapfrog_steps: Integer number of steps to run the leapfrog integrator
for. Total progress per HMC step is roughly proportional to `step_size *
num_leapfrog_steps`.
current_target_log_prob: (Optional) `Tensor` representing the value of
`target_log_prob_fn(*current_state_parts)`. The only reason to specify
this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
current_grads_target_log_prob: (Optional) Python list of `Tensor`s
representing gradient of `target_log_prob_fn(*current_state_parts`) wrt
`current_state_parts`. Must have same shape as `current_state_parts`. The
only reason to specify this argument is to reduce TF graph size.
Default value: `None` (i.e., compute as needed).
name: Python `str` name prefixed to Ops created by this function.
Default value: `None` (i.e., "hmc_leapfrog_integrator").
Returns:
proposed_momentums: Updated value of the momentum.
proposed_state_parts: Tensor or Python list of `Tensor`s representing the
state(s) of the Markov chain(s) at each result step. Has same shape as
input `current_state_parts`.
proposed_target_log_prob: `Tensor` representing the value of
`target_log_prob_fn` at `accepted_state`.
proposed_grads_target_log_prob: Gradient of `proposed_target_log_prob` wrt
`accepted_state`.
Raises:
ValueError: if `len(momentums) != len(state_parts)`.
ValueError: if `len(state_parts) != len(step_sizes)`.
ValueError: if `len(state_parts) != len(grads_target_log_prob)`.
TypeError: if `not target_log_prob.dtype.is_floating`.
"""
def _loop_body(step,
current_momentums,
current_state_parts,
ignore_current_target_log_prob, # pylint: disable=unused-argument
current_grads_target_log_prob):
return [step + 1] + list(_leapfrog_step(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
current_grads_target_log_prob))
with ops.name_scope(
name, "hmc_leapfrog_integrator",
[current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,
current_target_log_prob, current_grads_target_log_prob]):
if len(current_momentums) != len(current_state_parts):
raise ValueError("`momentums` must be in one-to-one correspondence "
"with `state_parts`")
num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps,
name="num_leapfrog_steps")
current_target_log_prob, current_grads_target_log_prob = (
_maybe_call_fn_and_grads(
target_log_prob_fn,
current_state_parts,
current_target_log_prob,
current_grads_target_log_prob))
return control_flow_ops.while_loop(
cond=lambda iter_, *args: iter_ < num_leapfrog_steps,
body=_loop_body,
loop_vars=[
np.int32(0), # iter_
current_momentums,
current_state_parts,
current_target_log_prob,
current_grads_target_log_prob,
],
back_prop=False)[1:] # Lop-off "iter_".
def _leapfrog_step(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
current_grads_target_log_prob,
name=None):
"""Applies one step of the leapfrog integrator."""
with ops.name_scope(
name, "_leapfrog_step",
[current_momentums, current_state_parts, step_sizes,
current_grads_target_log_prob]):
proposed_momentums = [m + 0.5 * ss * g for m, ss, g
in zip(current_momentums,
step_sizes,
current_grads_target_log_prob)]
proposed_state_parts = [x + ss * m for x, ss, m
in zip(current_state_parts,
step_sizes,
proposed_momentums)]
proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError("`target_log_prob_fn` must produce a `Tensor` "
"with `float` `dtype`.")
proposed_grads_target_log_prob = gradients_ops.gradients(
proposed_target_log_prob, proposed_state_parts)
if any(g is None for g in proposed_grads_target_log_prob):
raise ValueError(
"Encountered `None` gradient. Does your target `target_log_prob_fn` "
"access all `tf.Variable`s via `tf.get_variable`?\n"
" current_state_parts: {}\n"
" proposed_state_parts: {}\n"
" proposed_grads_target_log_prob: {}".format(
current_state_parts,
proposed_state_parts,
proposed_grads_target_log_prob))
proposed_momentums = [m + 0.5 * ss * g for m, ss, g
in zip(proposed_momentums,
step_sizes,
proposed_grads_target_log_prob)]
return [
proposed_momentums,
proposed_state_parts,
proposed_target_log_prob,
proposed_grads_target_log_prob,
]
def _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which computes the energy change."""
with ops.name_scope(
name, "compute_energy_change",
([current_target_log_prob, proposed_target_log_prob,
independent_chain_ndims] +
current_momentums + proposed_momentums)):
# Abbreviate lk0=log_kinetic_energy and lk1=proposed_log_kinetic_energy
# since they're a mouthful and lets us inline more.
lk0, lk1 = [], []
for current_momentum, proposed_momentum in zip(current_momentums,
proposed_momentums):
axis = math_ops.range(independent_chain_ndims,
array_ops.rank(current_momentum))
lk0.append(_log_sum_sq(current_momentum, axis))
lk1.append(_log_sum_sq(proposed_momentum, axis))
lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
axis=-1)
lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
axis=-1)
lp0 = -current_target_log_prob # log_potential
lp1 = -proposed_target_log_prob # proposed_log_potential
x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf.
# Thus we will replace such rows with infinite energy change which implies
# rejection. Recall that float-comparisons with NaN are always False.
is_sum_determinate = (
math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = array_ops.tile(
is_sum_determinate[..., array_ops.newaxis],
multiples=array_ops.concat([
array_ops.ones(array_ops.rank(is_sum_determinate),
dtype=dtypes.int32),
[4],
], axis=0))
x = array_ops.where(is_sum_determinate,
x,
array_ops.fill(array_ops.shape(x),
value=x.dtype.as_numpy_dtype(np.inf)))
return math_ops.reduce_sum(x, axis=-1)
def _choose(is_accepted,
accepted,
rejected,
independent_chain_ndims,
name=None):
"""Helper to `kernel` which expand_dims `is_accepted` to apply tf.where."""
def _expand_is_accepted_like(x):
with ops.name_scope("_choose"):
expand_shape = array_ops.concat([
array_ops.shape(is_accepted),
array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)],
dtype=dtypes.int32),
], axis=0)
multiples = array_ops.concat([
array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32),
array_ops.shape(x)[independent_chain_ndims:],
], axis=0)
m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(x.shape)
return m
with ops.name_scope(name, "_choose", values=[
is_accepted, accepted, rejected, independent_chain_ndims]):
return array_ops.where(_expand_is_accepted_like(accepted),
accepted,
rejected)
def _maybe_call_fn_and_grads(fn,
fn_arg_list,
fn_result=None,
grads_fn_result=None,
description="target_log_prob"):
"""Helper which computes `fn_result` and `grads` if needed."""
fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list)
else [fn_arg_list])
if fn_result is None:
fn_result = fn(*fn_arg_list)
if not fn_result.dtype.is_floating:
raise TypeError("`{}` must be a `Tensor` with `float` `dtype`.".format(
description))
if grads_fn_result is None:
grads_fn_result = gradients_ops.gradients(
fn_result, fn_arg_list)
if len(fn_arg_list) != len(grads_fn_result):
raise ValueError("`{}` must be in one-to-one correspondence with "
"`grads_{}`".format(*[description]*2))
if any(g is None for g in grads_fn_result):
raise ValueError("Encountered `None` gradient.")
return fn_result, grads_fn_result
def _prepare_args(target_log_prob_fn, state, step_size,
target_log_prob=None, grads_target_log_prob=None,
maybe_expand=False, description="target_log_prob"):
"""Helper which processes input args to meet list-like assumptions."""
state_parts = list(state) if _is_list_like(state) else [state]
state_parts = [ops.convert_to_tensor(s, name="state")
for s in state_parts]
target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob,
description)
step_sizes = list(step_size) if _is_list_like(step_size) else [step_size]
step_sizes = [
ops.convert_to_tensor(
s, name="step_size", dtype=target_log_prob.dtype)
for s in step_sizes]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError("There should be exactly one `step_size` or it should "
"have same length as `current_state`.")
maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0]
return [
maybe_flatten(state_parts),
maybe_flatten(step_sizes),
target_log_prob,
grads_target_log_prob,
]
def _is_list_like(x):
"""Helper which returns `True` if input is `list`-like."""
return isinstance(x, (tuple, list))
def _log_sum_sq(x, axis=None):
"""Computes log(sum(x**2))."""
return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)
| 38.838111 | 83 | 0.666319 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gradients_impl as gradients_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops.distributions import util as distributions_util
__all__ = [
"sample_chain",
"sample_annealed_importance_chain",
"kernel",
]
KernelResults = collections.namedtuple(
"KernelResults",
[
"acceptance_probs",
"current_grads_target_log_prob",
"current_target_log_prob",
"energy_change",
"is_accepted",
"proposed_grads_target_log_prob",
"proposed_state",
"proposed_target_log_prob",
"random_positive",
])
def _make_dummy_kernel_results(
dummy_state,
dummy_target_log_prob,
dummy_grads_target_log_prob):
return KernelResults(
acceptance_probs=dummy_target_log_prob,
current_grads_target_log_prob=dummy_grads_target_log_prob,
current_target_log_prob=dummy_target_log_prob,
energy_change=dummy_target_log_prob,
is_accepted=array_ops.ones_like(dummy_target_log_prob, dtypes.bool),
proposed_grads_target_log_prob=dummy_grads_target_log_prob,
proposed_state=dummy_state,
proposed_target_log_prob=dummy_target_log_prob,
random_positive=dummy_target_log_prob,
)
def sample_chain(
num_results,
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
num_burnin_steps=0,
num_steps_between_results=0,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
with ops.name_scope(
name, "hmc_sample_chain",
[num_results, current_state, step_size, num_leapfrog_steps,
num_burnin_steps, num_steps_between_results, seed,
current_target_log_prob, current_grads_target_log_prob]):
with ops.name_scope("initialize"):
[
current_state,
step_size,
current_target_log_prob,
current_grads_target_log_prob,
] = _prepare_args(
target_log_prob_fn,
current_state,
step_size,
current_target_log_prob,
current_grads_target_log_prob)
num_results = ops.convert_to_tensor(
num_results,
dtype=dtypes.int32,
name="num_results")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
num_burnin_steps = ops.convert_to_tensor(
num_burnin_steps,
dtype=dtypes.int32,
name="num_burnin_steps")
num_steps_between_results = ops.convert_to_tensor(
num_steps_between_results,
dtype=dtypes.int32,
name="num_steps_between_results")
def _run_chain(num_steps, current_state, kernel_results):
def _loop_body(iter_, current_state, kernel_results):
return [iter_ + 1] + list(kernel(
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed,
kernel_results.current_target_log_prob,
kernel_results.current_grads_target_log_prob))
while_loop_kwargs = dict(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0),
current_state,
kernel_results,
],
)
if seed is not None:
while_loop_kwargs["parallel_iterations"] = 1
return control_flow_ops.while_loop(
**while_loop_kwargs)[1:]
def _scan_body(args_list, iter_):
current_state, kernel_results = args_list
return _run_chain(
1 + array_ops.where(math_ops.equal(iter_, 0),
num_burnin_steps,
num_steps_between_results),
current_state,
kernel_results)
scan_kwargs = dict(
fn=_scan_body,
elems=math_ops.range(num_results),
initializer=[
current_state,
_make_dummy_kernel_results(
current_state,
current_target_log_prob,
current_grads_target_log_prob),
])
if seed is not None:
scan_kwargs["parallel_iterations"] = 1
return functional_ops.scan(**scan_kwargs)
def sample_annealed_importance_chain(
proposal_log_prob_fn,
num_steps,
target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed=None,
name=None):
def make_convex_combined_log_prob_fn(iter_):
def _fn(*args):
p = proposal_log_prob_fn(*args)
t = target_log_prob_fn(*args)
dtype = p.dtype.base_dtype
beta = (math_ops.cast(iter_ + 1, dtype)
/ math_ops.cast(num_steps, dtype))
return (1. - beta) * p + beta * t
return _fn
with ops.name_scope(
name, "hmc_sample_annealed_importance_chain",
[num_steps, current_state, step_size, num_leapfrog_steps, seed]):
with ops.name_scope("initialize"):
[
current_state,
step_size,
current_log_prob,
current_grads_log_prob,
] = _prepare_args(
make_convex_combined_log_prob_fn(iter_=0),
current_state,
step_size,
description="convex_combined_log_prob")
num_steps = ops.convert_to_tensor(
num_steps,
dtype=dtypes.int32,
name="num_steps")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
def _loop_body(iter_, ais_weights, current_state, kernel_results):
current_state_parts = (list(current_state)
if _is_list_like(current_state)
else [current_state])
ais_weights += ((target_log_prob_fn(*current_state_parts)
- proposal_log_prob_fn(*current_state_parts))
/ math_ops.cast(num_steps, ais_weights.dtype))
return [iter_ + 1, ais_weights] + list(kernel(
make_convex_combined_log_prob_fn(iter_),
current_state,
step_size,
num_leapfrog_steps,
seed,
kernel_results.current_target_log_prob,
kernel_results.current_grads_target_log_prob))
while_loop_kwargs = dict(
cond=lambda iter_, *args: iter_ < num_steps,
body=_loop_body,
loop_vars=[
np.int32(0),
array_ops.zeros_like(current_log_prob),
current_state,
_make_dummy_kernel_results(current_state,
current_log_prob,
current_grads_log_prob),
])
if seed is not None:
while_loop_kwargs["parallel_iterations"] = 1
[ais_weights, current_state, kernel_results] = control_flow_ops.while_loop(
**while_loop_kwargs)[1:]
return [current_state, ais_weights, kernel_results]
def kernel(target_log_prob_fn,
current_state,
step_size,
num_leapfrog_steps,
seed=None,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
with ops.name_scope(
name, "hmc_kernel",
[current_state, step_size, num_leapfrog_steps, seed,
current_target_log_prob, current_grads_target_log_prob]):
with ops.name_scope("initialize"):
[current_state_parts, step_sizes, current_target_log_prob,
current_grads_target_log_prob] = _prepare_args(
target_log_prob_fn, current_state, step_size,
current_target_log_prob, current_grads_target_log_prob,
maybe_expand=True)
independent_chain_ndims = distributions_util.prefer_static_rank(
current_target_log_prob)
current_momentums = []
for s in current_state_parts:
current_momentums.append(random_ops.random_normal(
shape=array_ops.shape(s),
dtype=s.dtype.base_dtype,
seed=seed))
seed = distributions_util.gen_new_seed(
seed, salt="hmc_kernel_momentums")
num_leapfrog_steps = ops.convert_to_tensor(
num_leapfrog_steps,
dtype=dtypes.int32,
name="num_leapfrog_steps")
[
proposed_momentums,
proposed_state_parts,
proposed_target_log_prob,
proposed_grads_target_log_prob,
] = _leapfrog_integrator(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
num_leapfrog_steps,
current_target_log_prob,
current_grads_target_log_prob)
energy_change = _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims)
# uniform from positive reals, i.e., `tf.random_uniform(minval=0,
# maxval=np.inf)` won't work.)
random_uniform = random_ops.random_uniform(
shape=array_ops.shape(energy_change),
dtype=energy_change.dtype,
seed=seed)
random_positive = -math_ops.log(random_uniform)
is_accepted = random_positive >= energy_change
accepted_target_log_prob = array_ops.where(is_accepted,
proposed_target_log_prob,
current_target_log_prob)
accepted_state_parts = [_choose(is_accepted,
proposed_state_part,
current_state_part,
independent_chain_ndims)
for current_state_part, proposed_state_part
in zip(current_state_parts, proposed_state_parts)]
accepted_grads_target_log_prob = [
_choose(is_accepted,
proposed_grad,
grad,
independent_chain_ndims)
for proposed_grad, grad
in zip(proposed_grads_target_log_prob, current_grads_target_log_prob)]
maybe_flatten = lambda x: x if _is_list_like(current_state) else x[0]
return [
maybe_flatten(accepted_state_parts),
KernelResults(
acceptance_probs=math_ops.exp(math_ops.minimum(-energy_change, 0.)),
current_grads_target_log_prob=accepted_grads_target_log_prob,
current_target_log_prob=accepted_target_log_prob,
energy_change=energy_change,
is_accepted=is_accepted,
proposed_grads_target_log_prob=proposed_grads_target_log_prob,
proposed_state=maybe_flatten(proposed_state_parts),
proposed_target_log_prob=proposed_target_log_prob,
random_positive=random_positive,
),
]
def _leapfrog_integrator(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
num_leapfrog_steps,
current_target_log_prob=None,
current_grads_target_log_prob=None,
name=None):
def _loop_body(step,
current_momentums,
current_state_parts,
ignore_current_target_log_prob,
current_grads_target_log_prob):
return [step + 1] + list(_leapfrog_step(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
current_grads_target_log_prob))
with ops.name_scope(
name, "hmc_leapfrog_integrator",
[current_momentums, current_state_parts, step_sizes, num_leapfrog_steps,
current_target_log_prob, current_grads_target_log_prob]):
if len(current_momentums) != len(current_state_parts):
raise ValueError("`momentums` must be in one-to-one correspondence "
"with `state_parts`")
num_leapfrog_steps = ops.convert_to_tensor(num_leapfrog_steps,
name="num_leapfrog_steps")
current_target_log_prob, current_grads_target_log_prob = (
_maybe_call_fn_and_grads(
target_log_prob_fn,
current_state_parts,
current_target_log_prob,
current_grads_target_log_prob))
return control_flow_ops.while_loop(
cond=lambda iter_, *args: iter_ < num_leapfrog_steps,
body=_loop_body,
loop_vars=[
np.int32(0),
current_momentums,
current_state_parts,
current_target_log_prob,
current_grads_target_log_prob,
],
back_prop=False)[1:]
def _leapfrog_step(current_momentums,
target_log_prob_fn,
current_state_parts,
step_sizes,
current_grads_target_log_prob,
name=None):
with ops.name_scope(
name, "_leapfrog_step",
[current_momentums, current_state_parts, step_sizes,
current_grads_target_log_prob]):
proposed_momentums = [m + 0.5 * ss * g for m, ss, g
in zip(current_momentums,
step_sizes,
current_grads_target_log_prob)]
proposed_state_parts = [x + ss * m for x, ss, m
in zip(current_state_parts,
step_sizes,
proposed_momentums)]
proposed_target_log_prob = target_log_prob_fn(*proposed_state_parts)
if not proposed_target_log_prob.dtype.is_floating:
raise TypeError("`target_log_prob_fn` must produce a `Tensor` "
"with `float` `dtype`.")
proposed_grads_target_log_prob = gradients_ops.gradients(
proposed_target_log_prob, proposed_state_parts)
if any(g is None for g in proposed_grads_target_log_prob):
raise ValueError(
"Encountered `None` gradient. Does your target `target_log_prob_fn` "
"access all `tf.Variable`s via `tf.get_variable`?\n"
" current_state_parts: {}\n"
" proposed_state_parts: {}\n"
" proposed_grads_target_log_prob: {}".format(
current_state_parts,
proposed_state_parts,
proposed_grads_target_log_prob))
proposed_momentums = [m + 0.5 * ss * g for m, ss, g
in zip(proposed_momentums,
step_sizes,
proposed_grads_target_log_prob)]
return [
proposed_momentums,
proposed_state_parts,
proposed_target_log_prob,
proposed_grads_target_log_prob,
]
def _compute_energy_change(current_target_log_prob,
current_momentums,
proposed_target_log_prob,
proposed_momentums,
independent_chain_ndims,
name=None):
with ops.name_scope(
name, "compute_energy_change",
([current_target_log_prob, proposed_target_log_prob,
independent_chain_ndims] +
current_momentums + proposed_momentums)):
lk0, lk1 = [], []
for current_momentum, proposed_momentum in zip(current_momentums,
proposed_momentums):
axis = math_ops.range(independent_chain_ndims,
array_ops.rank(current_momentum))
lk0.append(_log_sum_sq(current_momentum, axis))
lk1.append(_log_sum_sq(proposed_momentum, axis))
lk0 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk0, axis=-1),
axis=-1)
lk1 = -np.log(2.) + math_ops.reduce_logsumexp(array_ops.stack(lk1, axis=-1),
axis=-1)
lp0 = -current_target_log_prob # log_potential
lp1 = -proposed_target_log_prob # proposed_log_potential
x = array_ops.stack([lp1, math_ops.exp(lk1), -lp0, -math_ops.exp(lk0)],
axis=-1)
# The sum is NaN if any element is NaN or we see both +Inf and -Inf.
# Thus we will replace such rows with infinite energy change which implies
# rejection. Recall that float-comparisons with NaN are always False.
is_sum_determinate = (
math_ops.reduce_all(math_ops.is_finite(x) | (x >= 0.), axis=-1) &
math_ops.reduce_all(math_ops.is_finite(x) | (x <= 0.), axis=-1))
is_sum_determinate = array_ops.tile(
is_sum_determinate[..., array_ops.newaxis],
multiples=array_ops.concat([
array_ops.ones(array_ops.rank(is_sum_determinate),
dtype=dtypes.int32),
[4],
], axis=0))
x = array_ops.where(is_sum_determinate,
x,
array_ops.fill(array_ops.shape(x),
value=x.dtype.as_numpy_dtype(np.inf)))
return math_ops.reduce_sum(x, axis=-1)
def _choose(is_accepted,
accepted,
rejected,
independent_chain_ndims,
name=None):
def _expand_is_accepted_like(x):
with ops.name_scope("_choose"):
expand_shape = array_ops.concat([
array_ops.shape(is_accepted),
array_ops.ones([array_ops.rank(x) - array_ops.rank(is_accepted)],
dtype=dtypes.int32),
], axis=0)
multiples = array_ops.concat([
array_ops.ones([array_ops.rank(is_accepted)], dtype=dtypes.int32),
array_ops.shape(x)[independent_chain_ndims:],
], axis=0)
m = array_ops.tile(array_ops.reshape(is_accepted, expand_shape),
multiples)
m.set_shape(x.shape)
return m
with ops.name_scope(name, "_choose", values=[
is_accepted, accepted, rejected, independent_chain_ndims]):
return array_ops.where(_expand_is_accepted_like(accepted),
accepted,
rejected)
def _maybe_call_fn_and_grads(fn,
fn_arg_list,
fn_result=None,
grads_fn_result=None,
description="target_log_prob"):
fn_arg_list = (list(fn_arg_list) if _is_list_like(fn_arg_list)
else [fn_arg_list])
if fn_result is None:
fn_result = fn(*fn_arg_list)
if not fn_result.dtype.is_floating:
raise TypeError("`{}` must be a `Tensor` with `float` `dtype`.".format(
description))
if grads_fn_result is None:
grads_fn_result = gradients_ops.gradients(
fn_result, fn_arg_list)
if len(fn_arg_list) != len(grads_fn_result):
raise ValueError("`{}` must be in one-to-one correspondence with "
"`grads_{}`".format(*[description]*2))
if any(g is None for g in grads_fn_result):
raise ValueError("Encountered `None` gradient.")
return fn_result, grads_fn_result
def _prepare_args(target_log_prob_fn, state, step_size,
target_log_prob=None, grads_target_log_prob=None,
maybe_expand=False, description="target_log_prob"):
state_parts = list(state) if _is_list_like(state) else [state]
state_parts = [ops.convert_to_tensor(s, name="state")
for s in state_parts]
target_log_prob, grads_target_log_prob = _maybe_call_fn_and_grads(
target_log_prob_fn,
state_parts,
target_log_prob,
grads_target_log_prob,
description)
step_sizes = list(step_size) if _is_list_like(step_size) else [step_size]
step_sizes = [
ops.convert_to_tensor(
s, name="step_size", dtype=target_log_prob.dtype)
for s in step_sizes]
if len(step_sizes) == 1:
step_sizes *= len(state_parts)
if len(state_parts) != len(step_sizes):
raise ValueError("There should be exactly one `step_size` or it should "
"have same length as `current_state`.")
maybe_flatten = lambda x: x if maybe_expand or _is_list_like(state) else x[0]
return [
maybe_flatten(state_parts),
maybe_flatten(step_sizes),
target_log_prob,
grads_target_log_prob,
]
def _is_list_like(x):
return isinstance(x, (tuple, list))
def _log_sum_sq(x, axis=None):
return math_ops.reduce_logsumexp(2. * math_ops.log(math_ops.abs(x)), axis)
| true | true |
f724923d68de6140d8f4a588c2dc56847fda4dbf | 1,235 | py | Python | k8s/redis_component.py | wesfloyd/aave-ui-caching-server | 714149d66b8718d0610dc84faff2854930f05f3f | [
"BSD-3-Clause"
] | null | null | null | k8s/redis_component.py | wesfloyd/aave-ui-caching-server | 714149d66b8718d0610dc84faff2854930f05f3f | [
"BSD-3-Clause"
] | null | null | null | k8s/redis_component.py | wesfloyd/aave-ui-caching-server | 714149d66b8718d0610dc84faff2854930f05f3f | [
"BSD-3-Clause"
] | null | null | null | from kdsl.apps.v1 import Deployment, DeploymentSpec
from kdsl.core.v1 import Service, ServiceSpec, PodSpec, ObjectMeta, ContainerItem
import values
name = "redis"
labels = dict(component=name)
annotations = values.shared_annotations
metadata = ObjectMeta(
name=name,
namespace=values.NAMESPACE,
labels=dict(**labels, **values.shared_labels, **values.datadog_labels(name)),
annotations=values.shared_annotations
)
service = Service(
metadata=metadata,
spec=ServiceSpec(
selector=labels,
ports={
6379: dict(name="redis"),
},
),
)
pod_spec = PodSpec(
containers=dict(
redis=ContainerItem(
image="redis:6-alpine",
imagePullPolicy="Always",
ports={
6379: dict(name="redis", protocol="TCP"),
},
),
),
)
deployment = Deployment(
metadata=metadata,
spec=DeploymentSpec(
replicas=1,
selector=dict(matchLabels=labels),
template=dict(
metadata=ObjectMeta(
labels=dict(**metadata.labels),
annotations=annotations
),
spec=pod_spec,
),
),
)
entries = [service, deployment]
| 20.583333 | 81 | 0.597571 | from kdsl.apps.v1 import Deployment, DeploymentSpec
from kdsl.core.v1 import Service, ServiceSpec, PodSpec, ObjectMeta, ContainerItem
import values
name = "redis"
labels = dict(component=name)
annotations = values.shared_annotations
metadata = ObjectMeta(
name=name,
namespace=values.NAMESPACE,
labels=dict(**labels, **values.shared_labels, **values.datadog_labels(name)),
annotations=values.shared_annotations
)
service = Service(
metadata=metadata,
spec=ServiceSpec(
selector=labels,
ports={
6379: dict(name="redis"),
},
),
)
pod_spec = PodSpec(
containers=dict(
redis=ContainerItem(
image="redis:6-alpine",
imagePullPolicy="Always",
ports={
6379: dict(name="redis", protocol="TCP"),
},
),
),
)
deployment = Deployment(
metadata=metadata,
spec=DeploymentSpec(
replicas=1,
selector=dict(matchLabels=labels),
template=dict(
metadata=ObjectMeta(
labels=dict(**metadata.labels),
annotations=annotations
),
spec=pod_spec,
),
),
)
entries = [service, deployment]
| true | true |
f72492ac2f5be9bca11a91d75997f67a02504d8f | 56,520 | py | Python | letsencrypt-apache/letsencrypt_apache/configurator.py | sinesiobittencourt/letsencrypt | 0c704fa7f27277c838e13deed85e5c1ecbe46e90 | [
"Apache-2.0"
] | 1 | 2015-12-26T22:50:32.000Z | 2015-12-26T22:50:32.000Z | letsencrypt-apache/letsencrypt_apache/configurator.py | sinesiobittencourt/letsencrypt | 0c704fa7f27277c838e13deed85e5c1ecbe46e90 | [
"Apache-2.0"
] | null | null | null | letsencrypt-apache/letsencrypt_apache/configurator.py | sinesiobittencourt/letsencrypt | 0c704fa7f27277c838e13deed85e5c1ecbe46e90 | [
"Apache-2.0"
] | null | null | null | """Apache Configuration based off of Augeas Configurator."""
# pylint: disable=too-many-lines
import filecmp
import logging
import os
import re
import shutil
import socket
import time
import zope.interface
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import tls_sni_01
from letsencrypt_apache import obj
from letsencrypt_apache import parser
from collections import defaultdict
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
# and load()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been been tested and built for Ubuntu
14.04 Apache 2.4 and it works for Ubuntu 12.04 Apache 2.2
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2dismod' binary.")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
le_util.add_deprecated_argument(add, "init-script", 1)
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Add name_server association dict
self.assoc = dict()
# Outstanding challenges
self._chall_out = set()
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer.
:raises .errors.NoInstallationError: If Apache configs cannot be found
:raises .errors.MisconfigurationError: If Apache is misconfigured
:raises .errors.NotSupportedError: If Apache version is not supported
:raises .errors.PluginError: If there is any other error
"""
# Verify Apache is installed
for exe in (self.conf("ctl"), self.conf("enmod"), self.conf("dismod")):
if not le_util.exe_exists(exe):
raise errors.NoInstallationError
# Make sure configuration is valid
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
install_ssl_options_conf(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None): # pylint: disable=unused-argument
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
:raises errors.PluginError: When unable to deploy certificate due to
a lack of directives
"""
vhost = self.choose_vhost(domain)
self._clean_vhost(vhost)
# This is done first so that ssl module is enabled and cert_path,
# cert_key... can all be parsed appropriately
self.prepare_server_https("443")
path = {"cert_path": self.parser.find_dir("SSLCertificateFile", None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile", None, vhost.path)}
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
logger.debug("Apache version is %s",
".".join(str(i) for i in self.version))
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path\
option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name, temp=False):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
The returned vhost is guaranteed to have TLS enabled unless temp is
True. If temp is True, there is no such guarantee and the result is
not cached.
:param str target_name: domain name
:param bool temp: whether the vhost is only used temporarily
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available or chosen
"""
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if temp:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name, temp)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
"""Finds the best vhost for a target_name.
This does not upgrade a vhost to HTTPS... it only finds the most
appropriate vhost for the given target_name.
:returns: VHost or None
"""
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if vhost.modmacro is True:
continue
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
vhosts = self._non_default_vhosts()
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
"""Return all non _default_ only vhosts."""
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)))
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
"""Returns a reverse dns name if available.
:param addr: IP Address
:type addr: ~.common.Addr
:returns: name or empty string if name cannot be determined
:rtype: str
"""
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
if not host.modmacro:
host.aliases.add(serveralias)
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
macro = False
if "/macro/" in path.lower():
macro = True
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
is_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param letsencrypt_apache.obj.Addr target_addr: vhost address
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param addr: Address that will be added as NameVirtualHost directive
:type addr: :class:`~letsencrypt_apache.obj.Addr`
"""
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port.
:param str port: Port to listen on
"""
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for x in self.parser.find_dir("Listen")]
# In case no Listens are set (which really is a broken apache config)
if not listens:
listens = ["80"]
for listen in listens:
# For any listen statement, check if the machine also listens on Port 443.
# If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listens:
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
listens.append(port)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port) not in listens:
if port == "443":
args = ["%s:%s" % (ip, port)]
else:
# Non-standard ports should specify https protocol
args = ["%s:%s" % (ip, port), "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s:%s directive to %s\n" % (
ip, port, self.parser.loc["listen"])
listens.append("%s:%s" % (ip, port))
def make_addrs_sni_ready(self, addrs):
"""Checks to see if the server is ready for SNI challenges.
:param addrs: Addresses to check SNI compatibility
:type addrs: :class:`~letsencrypt_apache.obj.Addr`
"""
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Currently, we only support "
"configurations with one vhost per file")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
"""Copies over existing Vhost with IfModule mod_ssl.c> skeleton.
:param str avail_fp: Pointer to the original available non-ssl vhost
:param str ssl_fp: Full path where the new ssl_vhost will reside.
A new file is created on the filesystem.
"""
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile", "SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None, vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None, vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None, vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None, vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
"""Add NameVirtualHost Directives if necessary for new vhost.
NameVirtualHosts was a directive in Apache < 2.4
https://httpd.apache.org/docs/2.2/mod/core.html#namevirtualhost
:param vhost: New virtual host that was recently created.
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect", "ensure-http-header"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
:raises .errors.PluginError: If Enhancement is not supported, or if
there is any other problem with the enhancement.
"""
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _set_http_header(self, ssl_vhost, header_substring):
"""Enables header that is identified by header_substring on ssl_vhost.
If the header identified by header_substring is not already set,
a new Header directive is placed in ssl_vhost's configuration with
arguments from: constants.HTTP_HEADER[header_substring]
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
set with header header_substring.
"""
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
"""Checks to see if an there is an existing Header directive that
contains the string header_substring.
:param ssl_vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param header_substring: string that uniquely identifies a header.
e.g: Strict-Transport-Security, Upgrade-Insecure-Requests.
:type str
:returns: boolean
:rtype: (bool)
:raises errors.PluginEnhancementAlreadyPresent When header
header_substring exists
"""
header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
:raises .errors.PluginError: If no viable HTTP host can be created or
used for the redirect.
"""
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if LetsEncrypt redirection already exists
self._verify_no_letsencrypt_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# letsencrypt RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warn("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_letsencrypt_redirect(self, vhost):
"""Checks to see if a redirect was already installed by letsencrypt.
Checks to see if virtualhost already contains a rewrite rule that is
identical to Letsencrypt's redirection rewrite rule.
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises errors.PluginEnhancementAlreadyPresent: When the exact
letsencrypt redirection WriteRule exists in virtual host.
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list)
pat = r'.*(directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_id = m.group(1)
rewrite_args_dict[dir_id].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for matches in rewrite_args_dict.values():
if [self.aug.get(x) for x in matches] in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Let's Encrypt has already enabled redirection")
def _is_rewrite_exists(self, vhost):
"""Checks if there exists a RewriteRule directive in vhost
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: True if a RewriteRule directive exists.
:rtype: bool
"""
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
"""Checks if a RewriteEngine directive is on
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
rewrite_engine_path = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path:
return self.parser.get_arg(rewrite_engine_path[0])
return False
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = []
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost."""
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
"""Return all addrs of vhost with the port replaced with the specified.
:param obj.VirtualHost ssl_vhost: Original Vhost
:param str port: Desired port for new addresses
:returns: `set` of :class:`~obj.Addr`
"""
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
try:
if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)):
return True
except OSError:
pass
return False
def enable_site(self, vhost):
"""Enables an available site, Apache reload required.
.. note:: Does not make sure that the site correctly works or that all
modules are enabled appropriately.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.NotSupportedError: If filesystem layout is not
supported.
"""
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
"""Enables module in Apache.
Both enables and reloads Apache so module is active.
:param str mod_name: Name of the module to enable. (e.g. 'ssl')
:param bool temp: Whether or not this is a temporary action.
:raises .errors.NotSupportedError: If the filesystem layout is not
supported.
:raises .errors.MisconfigurationError: If a2enmod or a2dismod cannot be
run.
"""
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Reload is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
def _add_parser_mod(self, mod_name):
"""Shortcut for updating parser modules."""
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
"""Assumes mods-available, mods-enabled layout."""
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
"""Runs a config test and reloads the Apache server.
:raises .errors.MisconfigurationError: If either the config test
or reload fails.
"""
self.config_test()
self._reload()
def _reload(self):
"""Reloads the Apache server.
:raises .errors.MisconfigurationError: If reload fails
"""
try:
le_util.run_script([self.conf("ctl"), "-k", "graceful"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:raises .errors.MisconfigurationError: If config_test fails
"""
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.TLSSNI01]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out.update(achalls)
responses = [None] * len(achalls)
chall_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
chall_doer.add_chall(achall, i)
sni_response = chall_doer.perform()
if sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[chall_doer.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
"""Get known module dependencies.
.. note:: This does not need to be accurate in order for the client to
run. This simply keeps things clean if the user decides to revert
changes.
.. warning:: If all deps are not included, it may cause incorrect parsing
behavior, due to enable_mod's shortcut for updating the parser's
currently defined modules (`.ApacheConfigurator._add_parser_mod`)
This would only present a major problem in extremely atypical
configs that use ifmod for the missing deps.
"""
deps = {
"ssl": ["setenvif", "mime", "socache_shmcb"]
}
return deps.get(mod_name, [])
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast all to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
find_macro = avail_fp.lower().find("/macro")
if find_macro != -1:
avail_fp = avail_fp[:find_macro]
continue
break
return avail_fp
def install_ssl_options_conf(options_ssl):
"""
Copy Let's Encrypt's SSL options file into the system's config dir if
required.
"""
# XXX if we ever try to enforce a local privilege boundary (eg, running
# letsencrypt for unprivileged users via setuid), this function will need
# to be modified.
# XXX if the user is in security-autoupdate mode, we should be willing to
# overwrite the options_ssl file at least if it's unmodified:
# https://github.com/letsencrypt/letsencrypt/issues/1123
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| 38.845361 | 95 | 0.608935 |
import filecmp
import logging
import os
import re
import shutil
import socket
import time
import zope.interface
from acme import challenges
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import tls_sni_01
from letsencrypt_apache import obj
from letsencrypt_apache import parser
from collections import defaultdict
logger = logging.getLogger(__name__)
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
# TODO: Verify permissions on configuration root... it is easier than
# checking permissions on each of the relative directories and less error
# prone.
# TODO: Write a server protocol finder. Listen <port> <protocol> or
# Protocol <protocol>. This can verify partial setups are correct
# TODO: Add directives to sites-enabled... not sites-available.
# sites-available doesn't allow immediate find_dir search even with save()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest', "
"retrieving the Apache2 version number, and initialization "
"parameters.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("dismod", default=constants.CLI_DEFAULTS["dismod"],
help="Path to the Apache 'a2dismod' binary.")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
le_util.add_deprecated_argument(add, "init-script", 1)
def __init__(self, *args, **kwargs):
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
self.assoc = dict()
self._chall_out = set()
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect,
"ensure-http-header": self._set_http_header}
@property
def mod_ssl_conf(self):
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
for exe in (self.conf("ctl"), self.conf("enmod"), self.conf("dismod")):
if not le_util.exe_exists(exe):
raise errors.NoInstallationError
self.config_test()
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.conf("ctl"))
self.check_parsing_errors("httpd.aug")
if self.version is None:
self.version = self.get_version()
if self.version < (2, 2):
raise errors.NotSupportedError(
"Apache Version %s not supported.", str(self.version))
self.vhosts = self.get_virtual_hosts()
install_ssl_options_conf(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path,
chain_path=None, fullchain_path=None):
vhost = self.choose_vhost(domain)
self._clean_vhost(vhost)
self.prepare_server_https("443")
path = {"cert_path": self.parser.find_dir("SSLCertificateFile", None, vhost.path),
"cert_key": self.parser.find_dir("SSLCertificateKeyFile", None, vhost.path)}
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
"SSLCertificateChainFile", None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
raise errors.PluginError(
"Unable to find cert and/or key directives")
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
logger.debug("Apache version is %s",
".".join(str(i) for i in self.version))
if self.version < (2, 4, 8) or (chain_path and not fullchain_path):
# install SSLCertificateFile, SSLCertificateKeyFile,
# and SSLCertificateChainFile directives
set_cert_path = cert_path
self.aug.set(path["cert_path"][-1], cert_path)
self.aug.set(path["cert_key"][-1], key_path)
if chain_path is not None:
self.parser.add_dir(vhost.path,
"SSLCertificateChainFile", chain_path)
else:
raise errors.PluginError("--chain-path is required for your version of Apache")
else:
if not fullchain_path:
raise errors.PluginError("Please provide the --fullchain-path\
option pointing to your full chain file")
set_cert_path = fullchain_path
self.aug.set(path["cert_path"][-1], fullchain_path)
self.aug.set(path["cert_key"][-1], key_path)
# Save notes about the transaction that took place
self.save_notes += ("Changed vhost at %s with addresses of %s\n"
"\tSSLCertificateFile %s\n"
"\tSSLCertificateKeyFile %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs),
set_cert_path, key_path))
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name, temp=False):
# Allows for domain names to be associated with a virtual host
if target_name in self.assoc:
return self.assoc[target_name]
# Try to find a reasonable vhost
vhost = self._find_best_vhost(target_name)
if vhost is not None:
if temp:
return vhost
if not vhost.ssl:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
return self._choose_vhost_from_list(target_name, temp)
def _choose_vhost_from_list(self, target_name, temp=False):
# Select a vhost from a list
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is None:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
elif temp:
return vhost
elif not vhost.ssl:
addrs = self._get_proposed_addrs(vhost, "443")
# TODO: Conflicts is too conservative
if not any(vhost.enabled and vhost.conflicts(addrs) for vhost in self.vhosts):
vhost = self.make_vhost_ssl(vhost)
else:
logger.error(
"The selected vhost would conflict with other HTTPS "
"VirtualHosts within Apache. Please select another "
"vhost or add ServerNames to your configuration.")
raise errors.PluginError(
"VirtualHost not able to be selected.")
self.assoc[target_name] = vhost
return vhost
def _find_best_vhost(self, target_name):
# Points 4 - Servername SSL
# Points 3 - Address name with SSL
# Points 2 - Servername no SSL
# Points 1 - Address name with no SSL
best_candidate = None
best_points = 0
for vhost in self.vhosts:
if vhost.modmacro is True:
continue
if target_name in vhost.get_names():
points = 2
elif any(addr.get_addr() == target_name for addr in vhost.addrs):
points = 1
else:
# No points given if names can't be found.
# This gets hit but doesn't register
continue # pragma: no cover
if vhost.ssl:
points += 2
if points > best_points:
best_points = points
best_candidate = vhost
# No winners here... is there only one reasonable vhost?
if best_candidate is None:
# reasonable == Not all _default_ addrs
vhosts = self._non_default_vhosts()
# remove mod_macro hosts from reasonable vhosts
reasonable_vhosts = [vh for vh
in vhosts if vh.modmacro is False]
if len(reasonable_vhosts) == 1:
best_candidate = reasonable_vhosts[0]
return best_candidate
def _non_default_vhosts(self):
return [vh for vh in self.vhosts if not all(
addr.get_addr() == "_default_" for addr in vh.addrs
)]
def get_all_names(self):
all_names = set()
vhost_macro = []
for vhost in self.vhosts:
all_names.update(vhost.get_names())
if vhost.modmacro:
vhost_macro.append(vhost.filep)
for addr in vhost.addrs:
if common.hostname_regex.match(addr.get_addr()):
all_names.add(addr.get_addr())
else:
name = self.get_name_from_ip(addr)
if name:
all_names.add(name)
if len(vhost_macro) > 0:
zope.component.getUtility(interfaces.IDisplay).notification(
"Apache mod_macro seems to be in use in file(s):\n{0}"
"\n\nUnfortunately mod_macro is not yet supported".format(
"\n ".join(vhost_macro)))
return all_names
def get_name_from_ip(self, addr): # pylint: disable=no-self-use
# If it isn't a private IP, do a reverse DNS lookup
if not common.private_ips_regex.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
return socket.gethostbyaddr(addr.get_addr())[0]
except (socket.error, socket.herror, socket.timeout):
pass
return ""
def _add_servernames(self, host):
# Take the final ServerName as each overrides the previous
servername_match = self.parser.find_dir(
"ServerName", None, start=host.path, exclude=False)
serveralias_match = self.parser.find_dir(
"ServerAlias", None, start=host.path, exclude=False)
for alias in serveralias_match:
serveralias = self.parser.get_arg(alias)
if not host.modmacro:
host.aliases.add(serveralias)
if servername_match:
# Get last ServerName as each overwrites the previous
servername = self.parser.get_arg(servername_match[-1])
if not host.modmacro:
host.name = servername
def _create_vhost(self, path):
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(obj.Addr.fromstring(self.parser.get_arg(arg)))
is_ssl = False
if self.parser.find_dir("SSLEngine", "on", start=path, exclude=False):
is_ssl = True
# "SSLEngine on" might be set outside of <VirtualHost>
# Treat vhosts with port 443 as ssl vhosts
for addr in addrs:
if addr.get_port() == "443":
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
macro = False
if "/macro/" in path.lower():
macro = True
vhost = obj.VirtualHost(filename, path, addrs, is_ssl,
is_enabled, modmacro=macro)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir("NameVirtualHost", str(target_addr)))
def add_name_vhost(self, addr):
loc = parser.get_aug_path(self.parser.loc["name"])
if addr.get_port() == "443":
path = self.parser.add_dir_to_ifmodssl(
loc, "NameVirtualHost", [str(addr)])
else:
path = self.parser.add_dir(loc, "NameVirtualHost", [str(addr)])
msg = ("Setting %s to be NameBasedVirtualHost\n"
"\tDirective added to %s\n" % (addr, path))
logger.debug(msg)
self.save_notes += msg
def prepare_server_https(self, port, temp=False):
if "ssl_module" not in self.parser.modules:
self.enable_mod("ssl", temp=temp)
# Check for Listen <port>
# Note: This could be made to also look for ip:443 combo
listens = [self.parser.get_arg(x).split()[0] for x in self.parser.find_dir("Listen")]
# In case no Listens are set (which really is a broken apache config)
if not listens:
listens = ["80"]
for listen in listens:
# For any listen statement, check if the machine also listens on Port 443.
# If not, add such a listen statement.
if len(listen.split(":")) == 1:
# Its listening to all interfaces
if port not in listens:
if port == "443":
args = [port]
else:
# Non-standard ports should specify https protocol
args = [port, "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s directive to %s\n" % (
port, self.parser.loc["listen"])
listens.append(port)
else:
# The Listen statement specifies an ip
_, ip = listen[::-1].split(":", 1)
ip = ip[::-1]
if "%s:%s" % (ip, port) not in listens:
if port == "443":
args = ["%s:%s" % (ip, port)]
else:
# Non-standard ports should specify https protocol
args = ["%s:%s" % (ip, port), "https"]
self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["listen"]), "Listen", args)
self.save_notes += "Added Listen %s:%s directive to %s\n" % (
ip, port, self.parser.loc["listen"])
listens.append("%s:%s" % (ip, port))
def make_addrs_sni_ready(self, addrs):
# Version 2.4 and later are automatically SNI ready.
if self.version >= (2, 4):
return
for addr in addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
avail_fp = nonssl_vhost.filep
ssl_fp = self._get_ssl_vhost_path(avail_fp)
self._copy_create_ssl_vhost_skeleton(avail_fp, ssl_fp)
# Reload augeas to take into account the new vhost
self.aug.load()
# Get Vhost augeas path for new vhost
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Currently, we only support "
"configurations with one vhost per file")
else:
# This simplifies the process
vh_p = vh_p[0]
# Update Addresses
self._update_ssl_vhosts_addrs(vh_p)
# Add directives
self._add_dummy_ssl_directives(vh_p)
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
# Create the Vhost object
ssl_vhost = self._create_vhost(vh_p)
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
# Now check if addresses need to be added as NameBasedVhost addrs
# This is for compliance with versions of Apache < 2.4
self._add_name_vhost_if_necessary(ssl_vhost)
return ssl_vhost
def _get_ssl_vhost_path(self, non_ssl_vh_fp):
# Get filepath of new ssl_vhost
if non_ssl_vh_fp.endswith(".conf"):
return non_ssl_vh_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
return non_ssl_vh_fp + self.conf("le_vhost_ext")
def _copy_create_ssl_vhost_skeleton(self, avail_fp, ssl_fp):
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
def _update_ssl_vhosts_addrs(self, vh_path):
ssl_addrs = set()
ssl_addr_p = self.aug.match(vh_path + "/arg")
for addr in ssl_addr_p:
old_addr = obj.Addr.fromstring(
str(self.parser.get_arg(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
return ssl_addrs
def _clean_vhost(self, vhost):
# remove duplicated or conflicting ssl directives
self._deduplicate_directives(vhost.path,
["SSLCertificateFile", "SSLCertificateKeyFile"])
# remove all problematic directives
self._remove_directives(vhost.path, ["SSLCertificateChainFile"])
def _deduplicate_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None, vh_path, False)) > 1:
directive_path = self.parser.find_dir(directive, None, vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _remove_directives(self, vh_path, directives):
for directive in directives:
while len(self.parser.find_dir(directive, None, vh_path, False)) > 0:
directive_path = self.parser.find_dir(directive, None, vh_path, False)
self.aug.remove(re.sub(r"/\w*$", "", directive_path[0]))
def _add_dummy_ssl_directives(self, vh_path):
self.parser.add_dir(vh_path, "SSLCertificateFile",
"insert_cert_file_path")
self.parser.add_dir(vh_path, "SSLCertificateKeyFile",
"insert_key_file_path")
self.parser.add_dir(vh_path, "Include", self.mod_ssl_conf)
def _add_name_vhost_if_necessary(self, vhost):
need_to_save = False
# See if the exact address appears in any other vhost
# Remember 1.1.1.1:* == 1.1.1.1 -> hence any()
for addr in vhost.addrs:
for test_vh in self.vhosts:
if (vhost.filep != test_vh.filep and
any(test_addr == addr for test_addr in test_vh.addrs) and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
############################################################################
# Enhancements
############################################################################
def supported_enhancements(self): # pylint: disable=no-self-use
return ["redirect", "ensure-http-header"]
def enhance(self, domain, enhancement, options=None):
try:
func = self._enhance_func[enhancement]
except KeyError:
raise errors.PluginError(
"Unsupported enhancement: {0}".format(enhancement))
try:
func(self.choose_vhost(domain), options)
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
raise
def _set_http_header(self, ssl_vhost, header_substring):
if "headers_module" not in self.parser.modules:
self.enable_mod("headers")
# Check if selected header is already set
self._verify_no_matching_http_header(ssl_vhost, header_substring)
# Add directives to server
self.parser.add_dir(ssl_vhost.path, "Header",
constants.HEADER_ARGS[header_substring])
self.save_notes += ("Adding %s header to ssl vhost in %s\n" %
(header_substring, ssl_vhost.filep))
self.save()
logger.info("Adding %s header to ssl vhost in %s", header_substring,
ssl_vhost.filep)
def _verify_no_matching_http_header(self, ssl_vhost, header_substring):
header_path = self.parser.find_dir("Header", None, start=ssl_vhost.path)
if header_path:
# "Existing Header directive for virtualhost"
pat = '(?:[ "]|^)(%s)(?:[ "]|$)' % (header_substring.lower())
for match in header_path:
if re.search(pat, self.aug.get(match).lower()):
raise errors.PluginEnhancementAlreadyPresent(
"Existing %s header" % (header_substring))
def _enable_redirect(self, ssl_vhost, unused_options):
if "rewrite_module" not in self.parser.modules:
self.enable_mod("rewrite")
general_vh = self._get_http_vhost(ssl_vhost)
if general_vh is None:
# Add virtual_server with redirect
logger.debug("Did not find http version of ssl virtual host "
"attempting to create")
redirect_addrs = self._get_proposed_addrs(ssl_vhost)
for vhost in self.vhosts:
if vhost.enabled and vhost.conflicts(redirect_addrs):
raise errors.PluginError(
"Unable to find corresponding HTTP vhost; "
"Unable to create one as intended addresses conflict; "
"Current configuration does not support automated "
"redirection")
self._create_redirect_vhost(ssl_vhost)
else:
# Check if LetsEncrypt redirection already exists
self._verify_no_letsencrypt_redirect(general_vh)
# Note: if code flow gets here it means we didn't find the exact
# letsencrypt RewriteRule config for redirection. Finding
# another RewriteRule is likely to be fine in most or all cases,
# but redirect loops are possible in very obscure cases; see #1620
# for reasoning.
if self._is_rewrite_exists(general_vh):
logger.warn("Added an HTTP->HTTPS rewrite in addition to "
"other RewriteRules; you may wish to check for "
"overall consistency.")
# Add directives to server
# Note: These are not immediately searchable in sites-enabled
# even with save() and load()
if not self._is_rewrite_engine_on(general_vh):
self.parser.add_dir(general_vh.path, "RewriteEngine", "on")
if self.get_version() >= (2, 3, 9):
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS_WITH_END)
else:
self.parser.add_dir(general_vh.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_vh.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_vh.filep, ssl_vhost.filep)
def _verify_no_letsencrypt_redirect(self, vhost):
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
# There can be other RewriteRule directive lines in vhost config.
# rewrite_args_dict keys are directive ids and the corresponding value
# for each is a list of arguments to that directive.
rewrite_args_dict = defaultdict(list)
pat = r'.*(directive\[\d+\]).*'
for match in rewrite_path:
m = re.match(pat, match)
if m:
dir_id = m.group(1)
rewrite_args_dict[dir_id].append(match)
if rewrite_args_dict:
redirect_args = [constants.REWRITE_HTTPS_ARGS,
constants.REWRITE_HTTPS_ARGS_WITH_END]
for matches in rewrite_args_dict.values():
if [self.aug.get(x) for x in matches] in redirect_args:
raise errors.PluginEnhancementAlreadyPresent(
"Let's Encrypt has already enabled redirection")
def _is_rewrite_exists(self, vhost):
rewrite_path = self.parser.find_dir(
"RewriteRule", None, start=vhost.path)
return bool(rewrite_path)
def _is_rewrite_engine_on(self, vhost):
rewrite_engine_path = self.parser.find_dir("RewriteEngine", "on",
start=vhost.path)
if rewrite_engine_path:
return self.parser.get_arg(rewrite_engine_path[0])
return False
def _create_redirect_vhost(self, ssl_vhost):
text = self._get_redirect_config_str(ssl_vhost)
redirect_filepath = self._write_out_redirect(ssl_vhost, text)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _get_redirect_config_str(self, ssl_vhost):
# get servernames and serveraliases
serveralias = ""
servername = ""
if ssl_vhost.name is not None:
servername = "ServerName " + ssl_vhost.name
if ssl_vhost.aliases:
serveralias = "ServerAlias " + " ".join(ssl_vhost.aliases)
rewrite_rule_args = []
if self.get_version() >= (2, 3, 9):
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS_WITH_END
else:
rewrite_rule_args = constants.REWRITE_HTTPS_ARGS
return ("<VirtualHost %s>\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (" ".join(str(addr) for addr in self._get_proposed_addrs(ssl_vhost)),
servername, serveralias,
" ".join(rewrite_rule_args)))
def _write_out_redirect(self, ssl_vhost, text):
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if ssl_vhost.name is not None:
# make sure servername doesn't exceed filename length restriction
if len(ssl_vhost.name) < (255 - (len(redirect_filename) + 1)):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.name
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_file:
redirect_file.write(text)
logger.info("Created redirect file: %s", redirect_filename)
return redirect_filepath
def _get_http_vhost(self, ssl_vhost):
# First candidate vhosts filter
candidate_http_vhs = [
vhost for vhost in self.vhosts if not vhost.ssl
]
# Second filter - check addresses
for http_vh in candidate_http_vhs:
if http_vh.same_server(ssl_vhost):
return http_vh
return None
def _get_proposed_addrs(self, vhost, port="80"): # pylint: disable=no-self-use
redirects = set()
for addr in vhost.addrs:
redirects.add(addr.get_addr_obj(port))
return redirects
def get_all_certs_keys(self):
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
"SSLCertificateFile", None,
start=vhost.path, exclude=False)
key_path = self.parser.find_dir(
"SSLCertificateKeyFile", None,
start=vhost.path, exclude=False)
if cert_path and key_path:
cert = os.path.abspath(self.parser.get_arg(cert_path[-1]))
key = os.path.abspath(self.parser.get_arg(key_path[-1]))
c_k.add((cert, key, get_file_path(cert_path[-1])))
else:
logger.warning(
"Invalid VirtualHost configuration - %s", vhost.filep)
return c_k
def is_site_enabled(self, avail_fp):
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
try:
if filecmp.cmp(avail_fp, os.path.join(enabled_dir, entry)):
return True
except OSError:
pass
return False
def enable_site(self, vhost):
if self.is_site_enabled(vhost.filep):
return
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
else:
raise errors.NotSupportedError(
"Unsupported filesystem layout. "
"sites-available/enabled expected.")
def enable_mod(self, mod_name, temp=False):
# Support Debian specific setup
avail_path = os.path.join(self.parser.root, "mods-available")
enabled_path = os.path.join(self.parser.root, "mods-enabled")
if not os.path.isdir(avail_path) or not os.path.isdir(enabled_path):
raise errors.NotSupportedError(
"Unsupported directory layout. You may try to enable mod %s "
"and try again." % mod_name)
deps = _get_mod_deps(mod_name)
# Enable all dependencies
for dep in deps:
if (dep + "_module") not in self.parser.modules:
self._enable_mod_debian(dep, temp)
self._add_parser_mod(dep)
note = "Enabled dependency of %s module - %s" % (mod_name, dep)
if not temp:
self.save_notes += note + os.linesep
logger.debug(note)
# Enable actual module
self._enable_mod_debian(mod_name, temp)
self._add_parser_mod(mod_name)
if not temp:
self.save_notes += "Enabled %s module in Apache\n" % mod_name
logger.info("Enabled Apache %s module", mod_name)
# Modules can enable additional config files. Variables may be defined
# within these new configuration sections.
# Reload is not necessary as DUMP_RUN_CFG uses latest config.
self.parser.update_runtime_variables(self.conf("ctl"))
def _add_parser_mod(self, mod_name):
self.parser.modules.add(mod_name + "_module")
self.parser.modules.add("mod_" + mod_name + ".c")
def _enable_mod_debian(self, mod_name, temp):
# Generate reversal command.
# Try to be safe here... check that we can probably reverse before
# applying enmod command
if not le_util.exe_exists(self.conf("dismod")):
raise errors.MisconfigurationError(
"Unable to find a2dismod, please make sure a2enmod and "
"a2dismod are configured correctly for letsencrypt.")
self.reverter.register_undo_command(
temp, [self.conf("dismod"), mod_name])
le_util.run_script([self.conf("enmod"), mod_name])
def restart(self):
self.config_test()
self._reload()
def _reload(self):
try:
le_util.run_script([self.conf("ctl"), "-k", "graceful"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def config_test(self): # pylint: disable=no-self-use
try:
le_util.run_script([self.conf("ctl"), "configtest"])
except errors.SubprocessError as err:
raise errors.MisconfigurationError(str(err))
def get_version(self):
try:
stdout, _ = le_util.run_script([self.conf("ctl"), "-v"])
except errors.SubprocessError:
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(stdout)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
return [challenges.TLSSNI01]
def perform(self, achalls):
self._chall_out.update(achalls)
responses = [None] * len(achalls)
chall_doer = tls_sni_01.ApacheTlsSni01(self)
for i, achall in enumerate(achalls):
# Currently also have chall_doer hold associated index of the
# challenge. This helps to put all of the responses back together
# when they are all complete.
chall_doer.add_chall(achall, i)
sni_response = chall_doer.perform()
if sni_response:
# Must reload in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# TODO: Remove this dirty hack. We need to determine a reliable way
# of identifying when the new configuration is being used.
time.sleep(3)
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[chall_doer.indices[i]] = resp
return responses
def cleanup(self, achalls):
self._chall_out.difference_update(achalls)
# If all of the challenges have been finished, clean up everything
if not self._chall_out:
self.revert_challenge_config()
self.restart()
self.parser.init_modules()
def _get_mod_deps(mod_name):
deps = {
"ssl": ["setenvif", "mime", "socache_shmcb"]
}
return deps.get(mod_name, [])
def get_file_path(vhost_path):
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast all to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
find_macro = avail_fp.lower().find("/macro")
if find_macro != -1:
avail_fp = avail_fp[:find_macro]
continue
break
return avail_fp
def install_ssl_options_conf(options_ssl):
# XXX if we ever try to enforce a local privilege boundary (eg, running
# letsencrypt for unprivileged users via setuid), this function will need
# to be modified.
# XXX if the user is in security-autoupdate mode, we should be willing to
# overwrite the options_ssl file at least if it's unmodified:
# https://github.com/letsencrypt/letsencrypt/issues/1123
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
| true | true |
f724930ad6413116333562d13934d7c53d5d2f11 | 1,084 | py | Python | geocode-run.py | kmcurry/story-where | 8bb1e9cc73f4f6aefb8edf40137ab55d1ecde3f6 | [
"MIT"
] | null | null | null | geocode-run.py | kmcurry/story-where | 8bb1e9cc73f4f6aefb8edf40137ab55d1ecde3f6 | [
"MIT"
] | 25 | 2020-01-02T23:55:30.000Z | 2020-03-15T21:25:16.000Z | geocode-run.py | kmcurry/story-where | 8bb1e9cc73f4f6aefb8edf40137ab55d1ecde3f6 | [
"MIT"
] | null | null | null | from utils.db import Database
from datetime import datetime
import hashlib
import json
import os
import requests
url = 'https://maps.googleapis.com/maps/api/geocode/json'
api_key = os.environ['GOOGLE_MAPS_API_KEY']
params = {'key': api_key, 'address': 'Mountain View, CA'}
print("Downloading entities")
db = Database()
entities = db.get_entities_to_geocode()
print("Entities downloaded", len(entities))
for entity in entities:
print(entity.name)
h = hashlib.md5(bytes(entity.name, encoding='utf-8')).hexdigest()
print(h)
outfile = ".\\geocode_results\\" + h + ".json"
print(outfile)
if os.path.exists(outfile):
print("Skipping because Geocode results already exist")
continue
if not os.path.exists(os.path.dirname(outfile)):
os.makedirs(os.path.dirname(outfile))
params['address'] = entity.name
r = requests.get(url, params=params)
results = r.json()
results['address'] = entity.name
results['collected_utc_date'] = str(datetime.utcnow())
with open(outfile, 'w') as f:
json.dump(results, f)
| 26.439024 | 69 | 0.685424 | from utils.db import Database
from datetime import datetime
import hashlib
import json
import os
import requests
url = 'https://maps.googleapis.com/maps/api/geocode/json'
api_key = os.environ['GOOGLE_MAPS_API_KEY']
params = {'key': api_key, 'address': 'Mountain View, CA'}
print("Downloading entities")
db = Database()
entities = db.get_entities_to_geocode()
print("Entities downloaded", len(entities))
for entity in entities:
print(entity.name)
h = hashlib.md5(bytes(entity.name, encoding='utf-8')).hexdigest()
print(h)
outfile = ".\\geocode_results\\" + h + ".json"
print(outfile)
if os.path.exists(outfile):
print("Skipping because Geocode results already exist")
continue
if not os.path.exists(os.path.dirname(outfile)):
os.makedirs(os.path.dirname(outfile))
params['address'] = entity.name
r = requests.get(url, params=params)
results = r.json()
results['address'] = entity.name
results['collected_utc_date'] = str(datetime.utcnow())
with open(outfile, 'w') as f:
json.dump(results, f)
| true | true |
f724934d8bc8300e61211e06181ef9feb6293b80 | 4,506 | py | Python | generate.py | djohansson/volk | a6418b5ea289e9130429717654571cf89d603fdc | [
"MIT",
"Unlicense"
] | 1 | 2018-12-23T11:04:22.000Z | 2018-12-23T11:04:22.000Z | Common_3/ThirdParty/OpenSource/volk/generate.py | cmkandpane/The-Forge | 63a3c3038c1b48184b207ebeed5a5548d52648e6 | [
"Apache-2.0"
] | null | null | null | Common_3/ThirdParty/OpenSource/volk/generate.py | cmkandpane/The-Forge | 63a3c3038c1b48184b207ebeed5a5548d52648e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
from collections import OrderedDict
import sys
import urllib
import xml.etree.ElementTree as etree
import urllib.request
def parse_xml(path):
file = urllib.request.urlopen(path) if path.startswith("http") else open(path, 'r')
with file:
tree = etree.parse(file)
return tree
def patch_file(path, blocks):
result = []
block = None
with open(path, 'r') as file:
for line in file.readlines():
if block:
if line == block:
result.append(line)
block = None
else:
result.append(line)
if line.strip().startswith('/* VOLK_GENERATE_'):
block = line
result.append(blocks[line.strip()[17:-3]])
with open(path, 'w') as file:
for line in result:
file.write(line)
def is_descendant_type(types, name, base):
if name == base:
return True
type = types.get(name)
if not type:
return False
parents = type.get('parent')
if not parents:
return False
return any([is_descendant_type(types, parent, base) for parent in parents.split(',')])
def defined(key):
return 'defined(' + key + ')'
if __name__ == "__main__":
specpath = "https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/master/xml/vk.xml"
if len(sys.argv) > 1:
specpath = sys.argv[1]
spec = parse_xml(specpath)
block_keys = ('DEVICE_TABLE', 'PROTOTYPES_H', 'PROTOTYPES_C', 'LOAD_LOADER', 'LOAD_INSTANCE', 'LOAD_DEVICE', 'LOAD_DEVICE_TABLE')
blocks = {}
version = spec.find('types/type[name="VK_HEADER_VERSION"]')
blocks['VERSION'] = '#define VOLK_HEADER_VERSION ' + version.find('name').tail.strip() + '\n'
command_groups = OrderedDict()
for feature in spec.findall('feature'):
key = defined(feature.get('name'))
cmdrefs = feature.findall('require/command')
command_groups[key] = [cmdref.get('name') for cmdref in cmdrefs]
for ext in sorted(spec.findall('extensions/extension'), key=lambda ext: ext.get('name')):
name = ext.get('name')
for req in ext.findall('require'):
key = defined(name)
if req.get('feature'):
key += ' && ' + defined(req.get('feature'))
if req.get('extension'):
key += ' && ' + defined(req.get('extension'))
cmdrefs = req.findall('command')
command_groups.setdefault(key, []).extend([cmdref.get('name') for cmdref in cmdrefs])
commands_to_groups = OrderedDict()
for (group, cmdnames) in command_groups.items():
for name in cmdnames:
commands_to_groups.setdefault(name, []).append(group)
for (group, cmdnames) in command_groups.items():
command_groups[group] = [name for name in cmdnames if len(commands_to_groups[name]) == 1]
for (name, groups) in commands_to_groups.items():
if len(groups) == 1:
continue
key = ' || '.join(['(' + g + ')' for g in groups])
command_groups.setdefault(key, []).append(name)
commands = {}
for cmd in spec.findall('commands/command'):
if not cmd.get('alias'):
name = cmd.findtext('proto/name')
commands[name] = cmd
for cmd in spec.findall('commands/command'):
if cmd.get('alias'):
name = cmd.get('name')
commands[name] = commands[cmd.get('alias')]
types = {}
for type in spec.findall('types/type'):
name = type.findtext('name')
if name:
types[name] = type
for key in block_keys:
blocks[key] = ''
for (group, cmdnames) in command_groups.items():
ifdef = '#if ' + group + '\n'
for key in block_keys:
blocks[key] += ifdef
for name in sorted(cmdnames):
cmd = commands[name]
type = cmd.findtext('param[1]/type')
if name == 'vkGetInstanceProcAddr':
type = ''
if name == 'vkGetDeviceProcAddr':
type = 'VkInstance'
if is_descendant_type(types, type, 'VkDevice'):
blocks['LOAD_DEVICE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
blocks['DEVICE_TABLE'] += '\tPFN_' + name + ' ' + name + ';\n'
blocks['LOAD_DEVICE_TABLE'] += '\ttable->' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
elif is_descendant_type(types, type, 'VkInstance'):
blocks['LOAD_INSTANCE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
elif type != '':
blocks['LOAD_LOADER'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
blocks['PROTOTYPES_H'] += 'extern PFN_' + name + ' ' + name + ';\n'
blocks['PROTOTYPES_C'] += 'PFN_' + name + ' ' + name + ';\n'
for key in block_keys:
if blocks[key].endswith(ifdef):
blocks[key] = blocks[key][:-len(ifdef)]
else:
blocks[key] += '#endif /* ' + group + ' */\n'
patch_file('volk.h', blocks)
patch_file('volk.c', blocks)
| 29.25974 | 130 | 0.640923 |
from collections import OrderedDict
import sys
import urllib
import xml.etree.ElementTree as etree
import urllib.request
def parse_xml(path):
file = urllib.request.urlopen(path) if path.startswith("http") else open(path, 'r')
with file:
tree = etree.parse(file)
return tree
def patch_file(path, blocks):
result = []
block = None
with open(path, 'r') as file:
for line in file.readlines():
if block:
if line == block:
result.append(line)
block = None
else:
result.append(line)
if line.strip().startswith('/* VOLK_GENERATE_'):
block = line
result.append(blocks[line.strip()[17:-3]])
with open(path, 'w') as file:
for line in result:
file.write(line)
def is_descendant_type(types, name, base):
if name == base:
return True
type = types.get(name)
if not type:
return False
parents = type.get('parent')
if not parents:
return False
return any([is_descendant_type(types, parent, base) for parent in parents.split(',')])
def defined(key):
return 'defined(' + key + ')'
if __name__ == "__main__":
specpath = "https://raw.githubusercontent.com/KhronosGroup/Vulkan-Docs/master/xml/vk.xml"
if len(sys.argv) > 1:
specpath = sys.argv[1]
spec = parse_xml(specpath)
block_keys = ('DEVICE_TABLE', 'PROTOTYPES_H', 'PROTOTYPES_C', 'LOAD_LOADER', 'LOAD_INSTANCE', 'LOAD_DEVICE', 'LOAD_DEVICE_TABLE')
blocks = {}
version = spec.find('types/type[name="VK_HEADER_VERSION"]')
blocks['VERSION'] = '#define VOLK_HEADER_VERSION ' + version.find('name').tail.strip() + '\n'
command_groups = OrderedDict()
for feature in spec.findall('feature'):
key = defined(feature.get('name'))
cmdrefs = feature.findall('require/command')
command_groups[key] = [cmdref.get('name') for cmdref in cmdrefs]
for ext in sorted(spec.findall('extensions/extension'), key=lambda ext: ext.get('name')):
name = ext.get('name')
for req in ext.findall('require'):
key = defined(name)
if req.get('feature'):
key += ' && ' + defined(req.get('feature'))
if req.get('extension'):
key += ' && ' + defined(req.get('extension'))
cmdrefs = req.findall('command')
command_groups.setdefault(key, []).extend([cmdref.get('name') for cmdref in cmdrefs])
commands_to_groups = OrderedDict()
for (group, cmdnames) in command_groups.items():
for name in cmdnames:
commands_to_groups.setdefault(name, []).append(group)
for (group, cmdnames) in command_groups.items():
command_groups[group] = [name for name in cmdnames if len(commands_to_groups[name]) == 1]
for (name, groups) in commands_to_groups.items():
if len(groups) == 1:
continue
key = ' || '.join(['(' + g + ')' for g in groups])
command_groups.setdefault(key, []).append(name)
commands = {}
for cmd in spec.findall('commands/command'):
if not cmd.get('alias'):
name = cmd.findtext('proto/name')
commands[name] = cmd
for cmd in spec.findall('commands/command'):
if cmd.get('alias'):
name = cmd.get('name')
commands[name] = commands[cmd.get('alias')]
types = {}
for type in spec.findall('types/type'):
name = type.findtext('name')
if name:
types[name] = type
for key in block_keys:
blocks[key] = ''
for (group, cmdnames) in command_groups.items():
ifdef = '#if ' + group + '\n'
for key in block_keys:
blocks[key] += ifdef
for name in sorted(cmdnames):
cmd = commands[name]
type = cmd.findtext('param[1]/type')
if name == 'vkGetInstanceProcAddr':
type = ''
if name == 'vkGetDeviceProcAddr':
type = 'VkInstance'
if is_descendant_type(types, type, 'VkDevice'):
blocks['LOAD_DEVICE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
blocks['DEVICE_TABLE'] += '\tPFN_' + name + ' ' + name + ';\n'
blocks['LOAD_DEVICE_TABLE'] += '\ttable->' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
elif is_descendant_type(types, type, 'VkInstance'):
blocks['LOAD_INSTANCE'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
elif type != '':
blocks['LOAD_LOADER'] += '\t' + name + ' = (PFN_' + name + ')load(context, "' + name + '");\n'
blocks['PROTOTYPES_H'] += 'extern PFN_' + name + ' ' + name + ';\n'
blocks['PROTOTYPES_C'] += 'PFN_' + name + ' ' + name + ';\n'
for key in block_keys:
if blocks[key].endswith(ifdef):
blocks[key] = blocks[key][:-len(ifdef)]
else:
blocks[key] += '#endif /* ' + group + ' */\n'
patch_file('volk.h', blocks)
patch_file('volk.c', blocks)
| true | true |
f7249360b251c64a2879289eb9c28f008a4fc9e6 | 8,859 | py | Python | packages/dcos-integration-test/extra/test_legacy_user_management.py | wolf31o2/dcos | 113b8abacfd6d517594f329b621aaf4641b535e7 | [
"Apache-2.0"
] | null | null | null | packages/dcos-integration-test/extra/test_legacy_user_management.py | wolf31o2/dcos | 113b8abacfd6d517594f329b621aaf4641b535e7 | [
"Apache-2.0"
] | null | null | null | packages/dcos-integration-test/extra/test_legacy_user_management.py | wolf31o2/dcos | 113b8abacfd6d517594f329b621aaf4641b535e7 | [
"Apache-2.0"
] | null | null | null | """
A collection of tests covering legacy user management in DC/OS.
Legacy user management is considered to be the user management API offered by
`dcos-oauth` up to DC/OS release 1.12.
Assume that access control is activated in Master Admin Router (could be
disabled with `oauth_enabled`) and therefore authenticate individual HTTP
dcos_api_session.
One aspect of legacy DC/OS user management is that once authenticated a user can
add other users. Unauthenticated HTTP dcos_api_session are rejected by Master
Admin Router and user management fails (this is the coarse-grained authorization
model of (open) DC/OS). Here, test that unauthenticated HTTP dcos_api_session
cannot manage users. However, do not test that newly added users can add other
users: in this test suite we are limited to having authentication state for just
a single user available. This is why we can test managing other users only from
that first user's point of view. That is, we can not test that a user (e.g.
user2) which was added by the first user (user1) can add another user (user3).
"""
import logging
import uuid
import pytest
from dcos_test_utils import dcos_cli
from test_helpers import get_expanded_config
__maintainer__ = 'jgehrcke'
__contact__ = 'security-team@mesosphere.io'
log = logging.getLogger(__name__)
# Skip entire module in downstream integration tests.
@pytest.fixture(autouse=True)
def skip_in_downstream():
expanded_config = get_expanded_config()
if 'security' in expanded_config:
pytest.skip(
'Skip upstream-specific user management tests',
allow_module_level=True
)
def get_users(apisession):
r = apisession.get('/acs/api/v1/users')
r.raise_for_status()
users = {u['uid']: u for u in r.json()['array']}
return users
def delete_user(apisession, uid):
r = apisession.delete('/acs/api/v1/users/%s' % (uid, ))
r.raise_for_status()
assert r.status_code == 204
@pytest.fixture()
def remove_users_added_by_test(dcos_api_session):
users_before = set(get_users(dcos_api_session))
log.info('remove_users_added_by_test pre test: users are %s', users_before)
try:
yield
finally:
users_after = set(get_users(dcos_api_session))
new_uids = users_after - users_before
for uid in new_uids:
log.info('remove_users_added_by_test post test: remove `%s`', uid)
delete_user(dcos_api_session, uid)
def test_users_get(dcos_api_session):
users = get_users(dcos_api_session)
assert users
required_keys = ('uid', 'description')
for userdict in users.values():
for k in required_keys:
assert k in userdict
def test_user_put_no_email_uid_empty_body(dcos_api_session):
# This test mainly demonstrates a subtle API difference between dcos-oauth
# (legacy) and Bouncer.
r = dcos_api_session.put('/acs/api/v1/users/user1')
# This is the old behavior in dcos-oauth.
# assert r.status_code == 500
# assert 'invalid email' in r.text
# With Bouncer non-email uids are valid, and the request fails as of the
# missing request body.
assert r.status_code == 400
assert 'Request has bad Content-Type or lacks JSON data' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_legacy_user_creation_with_empty_json_doc(dcos_api_session):
# Legacy HTTP clients built for dcos-oauth such as the web UI (up to DC/OS
# 1.12) might insert users in the following way: uid appears to be an email
# address, and the JSON document in the request body does not provide a
# `public_key` or a `password` property (indicating local user), or is
# empty. The legacy web UI would insert users like that and expect those
# users to be remote users, usable with the legacy OIDC ID Token login
# method through the 'https://dcos.auth0.com/' provider. This behavior is
# maintained in Bouncer for backwards compatibility.
r = dcos_api_session.put('/acs/api/v1/users/user@domain.foo', json={})
assert r.status_code == 201, r.text
# Bouncer annotates the created user (this is new compared to dcos-oauth).
r = dcos_api_session.get('/acs/api/v1/users/user@domain.foo')
assert r.json()['provider_type'] == 'oidc'
assert r.json()['provider_id'] == 'https://dcos.auth0.com/'
assert r.json()['is_remote'] is True
# When the uid however does not appear to be an email address the more sane
# behavior of Bouncer takes effect: an empty (meaningless) JSON body
# results in a useful error message.
r = dcos_api_session.put('/acs/api/v1/users/user1', json={})
assert r.status_code == 400
assert 'One of `password` or `public_key` must be provided' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_email_uid_and_description(dcos_api_session):
r = dcos_api_session.put(
'/acs/api/v1/users/user1@domain.foo',
json={'description': 'integration test user'}
)
assert r.status_code == 201, r.text
users = get_users(dcos_api_session)
assert len(users) > 1
assert 'user1@domain.foo' in users
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_with_legacy_body(dcos_api_session):
# The UI up to DC/OS 1.12 sends the `creator_uid` and the `cluster_url`
# properties although they are not used by dcos-oauth. Bouncer supports
# these two properties for legacy reasons. Note(JP): As a follow-up task we
# should change the UI to not send these properties anymore, and then remove
# the properties from Bouncer's UserCreate JSON schema again, ideally within
# the 1.13 development cycle.
r = dcos_api_session.put(
'/acs/api/v1/users/user2@domain.foo',
json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'}
)
assert r.status_code == 201, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_conflict(dcos_api_session):
# Note: the empty request body is not the decisive criterion here.
r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={})
assert r.status_code == 201, r.text
r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={})
assert r.status_code == 409, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_delete(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user6@domain.foo', json={})
r.raise_for_status()
assert r.status_code == 201
r = dcos_api_session.delete('/acs/api/v1/users/user6@domain.foo')
r.raise_for_status()
assert r.status_code == 204
users = get_users(dcos_api_session)
assert 'user6@domain.foo' not in users
def test_user_put_requires_authentication(noauth_api_session):
r = noauth_api_session.put('/acs/api/v1/users/user7@domain.foo', json={})
assert r.status_code == 401, r.text
def test_dynamic_ui_config(dcos_api_session):
r = dcos_api_session.get('/dcos-metadata/ui-config.json')
data = r.json()
assert not data['clusterConfiguration']['firstUser']
assert 'id' in data['clusterConfiguration']
assert 'uiConfiguration' in data
def test_dcos_add_user(dcos_api_session):
"""
dcos_add_user.py script adds a user to IAM using the
script dcos_add_user.py.
"""
email_address = uuid.uuid4().hex + '@example.com'
cli = dcos_cli.DcosCli('')
command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address]
cli.exec_command(command)
try:
r = dcos_api_session.get('/acs/api/v1/users')
r.raise_for_status()
expected_user_data = {
"uid": email_address,
"description": "",
"url": "/acs/api/v1/users/" + email_address,
"is_remote": True,
"is_service": False,
"provider_type": "oidc",
"provider_id": "https://dcos.auth0.com/"
}
assert expected_user_data in r.json()['array']
finally:
delete_user(dcos_api_session, email_address)
def test_check_message_on_adding_user_twice(dcos_api_session):
"""
Check that the correct message is emitted on adding the
same user for the second time.
"""
email_address = uuid.uuid4().hex + '@example.com'
cli = dcos_cli.DcosCli('')
command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address]
stdout, stderr = cli.exec_command(command)
try:
expected_output = '[INFO] Created IAM user `' + email_address + '`\n'
assert '' == stdout
assert expected_output == stderr
stdout, stderr = cli.exec_command(command)
expected_error = '[INFO] User `' + email_address + '` already exists\n'
assert expected_error == stderr
assert '' == stdout
finally:
delete_user(dcos_api_session, email_address)
| 36.607438 | 80 | 0.703804 | import logging
import uuid
import pytest
from dcos_test_utils import dcos_cli
from test_helpers import get_expanded_config
__maintainer__ = 'jgehrcke'
__contact__ = 'security-team@mesosphere.io'
log = logging.getLogger(__name__)
@pytest.fixture(autouse=True)
def skip_in_downstream():
expanded_config = get_expanded_config()
if 'security' in expanded_config:
pytest.skip(
'Skip upstream-specific user management tests',
allow_module_level=True
)
def get_users(apisession):
r = apisession.get('/acs/api/v1/users')
r.raise_for_status()
users = {u['uid']: u for u in r.json()['array']}
return users
def delete_user(apisession, uid):
r = apisession.delete('/acs/api/v1/users/%s' % (uid, ))
r.raise_for_status()
assert r.status_code == 204
@pytest.fixture()
def remove_users_added_by_test(dcos_api_session):
users_before = set(get_users(dcos_api_session))
log.info('remove_users_added_by_test pre test: users are %s', users_before)
try:
yield
finally:
users_after = set(get_users(dcos_api_session))
new_uids = users_after - users_before
for uid in new_uids:
log.info('remove_users_added_by_test post test: remove `%s`', uid)
delete_user(dcos_api_session, uid)
def test_users_get(dcos_api_session):
users = get_users(dcos_api_session)
assert users
required_keys = ('uid', 'description')
for userdict in users.values():
for k in required_keys:
assert k in userdict
def test_user_put_no_email_uid_empty_body(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user1')
assert r.status_code == 400
assert 'Request has bad Content-Type or lacks JSON data' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_legacy_user_creation_with_empty_json_doc(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user@domain.foo', json={})
assert r.status_code == 201, r.text
r = dcos_api_session.get('/acs/api/v1/users/user@domain.foo')
assert r.json()['provider_type'] == 'oidc'
assert r.json()['provider_id'] == 'https://dcos.auth0.com/'
assert r.json()['is_remote'] is True
r = dcos_api_session.put('/acs/api/v1/users/user1', json={})
assert r.status_code == 400
assert 'One of `password` or `public_key` must be provided' in r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_email_uid_and_description(dcos_api_session):
r = dcos_api_session.put(
'/acs/api/v1/users/user1@domain.foo',
json={'description': 'integration test user'}
)
assert r.status_code == 201, r.text
users = get_users(dcos_api_session)
assert len(users) > 1
assert 'user1@domain.foo' in users
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_put_with_legacy_body(dcos_api_session):
# the 1.13 development cycle.
r = dcos_api_session.put(
'/acs/api/v1/users/user2@domain.foo',
json={'creator_uid': 'any@thing.bla', 'cluster_url': 'foobar'}
)
assert r.status_code == 201, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_conflict(dcos_api_session):
# Note: the empty request body is not the decisive criterion here.
r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={})
assert r.status_code == 201, r.text
r = dcos_api_session.put('/acs/api/v1/users/user2@domain.foo', json={})
assert r.status_code == 409, r.text
@pytest.mark.usefixtures('remove_users_added_by_test')
def test_user_delete(dcos_api_session):
r = dcos_api_session.put('/acs/api/v1/users/user6@domain.foo', json={})
r.raise_for_status()
assert r.status_code == 201
r = dcos_api_session.delete('/acs/api/v1/users/user6@domain.foo')
r.raise_for_status()
assert r.status_code == 204
users = get_users(dcos_api_session)
assert 'user6@domain.foo' not in users
def test_user_put_requires_authentication(noauth_api_session):
r = noauth_api_session.put('/acs/api/v1/users/user7@domain.foo', json={})
assert r.status_code == 401, r.text
def test_dynamic_ui_config(dcos_api_session):
r = dcos_api_session.get('/dcos-metadata/ui-config.json')
data = r.json()
assert not data['clusterConfiguration']['firstUser']
assert 'id' in data['clusterConfiguration']
assert 'uiConfiguration' in data
def test_dcos_add_user(dcos_api_session):
email_address = uuid.uuid4().hex + '@example.com'
cli = dcos_cli.DcosCli('')
command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address]
cli.exec_command(command)
try:
r = dcos_api_session.get('/acs/api/v1/users')
r.raise_for_status()
expected_user_data = {
"uid": email_address,
"description": "",
"url": "/acs/api/v1/users/" + email_address,
"is_remote": True,
"is_service": False,
"provider_type": "oidc",
"provider_id": "https://dcos.auth0.com/"
}
assert expected_user_data in r.json()['array']
finally:
delete_user(dcos_api_session, email_address)
def test_check_message_on_adding_user_twice(dcos_api_session):
email_address = uuid.uuid4().hex + '@example.com'
cli = dcos_cli.DcosCli('')
command = ['python', '/opt/mesosphere/bin/dcos_add_user.py', email_address]
stdout, stderr = cli.exec_command(command)
try:
expected_output = '[INFO] Created IAM user `' + email_address + '`\n'
assert '' == stdout
assert expected_output == stderr
stdout, stderr = cli.exec_command(command)
expected_error = '[INFO] User `' + email_address + '` already exists\n'
assert expected_error == stderr
assert '' == stdout
finally:
delete_user(dcos_api_session, email_address)
| true | true |
f72495af7deabdbbf171c378cd19e6201f5d9763 | 3,359 | py | Python | exercise/substitution_matrix.py | naiaralandeta/programming_naiara_landeta | f90f2d7d00bb84e4bc2b34f02c183fa0373fb97e | [
"MIT"
] | null | null | null | exercise/substitution_matrix.py | naiaralandeta/programming_naiara_landeta | f90f2d7d00bb84e4bc2b34f02c183fa0373fb97e | [
"MIT"
] | null | null | null | exercise/substitution_matrix.py | naiaralandeta/programming_naiara_landeta | f90f2d7d00bb84e4bc2b34f02c183fa0373fb97e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 19 10:16:35 2019
@author: naiara
"""
# SCORE MATRIX OR SUBSTITUTION MATRIX
"""
1 = "ACAGGTGGACCT"
2 = "ACTGGTCGACTC"
P(A) = 5/24 P(A, A) = 2/12 P(C, C) = 2/12 P(G, T) = 1
P(C) = 6/24 P(A, C) = 1 P(C, G) = 1/12 P(T, T) =1/12
P(G) = 7/24 P(A, G) = 1 P(C, T) = 1/12
P(T) = 6/24 P(A, T) = 1/12 P(G, G) = 3/12
We assume that whe the pairs do not exist the probability will be 1
s(a,b) = int[k * log(P(ab) / Qa * Qb] -> K = 1
A C T G
A 1.4294292643817876 1.2833012287035497 1.2163544390729364 1.3180633349627615
C 1.2833012287035497 1.271066772286538 1.1719352992845236 1.2388820889151366
T 1.2163544390729364 1.1719352992845236 1.1671364164027547 1.1371731930253115
G 1.3180633349627615 1.2388820889151366 1.1371731930253115 1.271066772286538
"""
import math
seq1 = "ACAGGTGGACCT"
seq2 = "ACTGGTCGACTT"
seq = "CTATATGG"
seq = "CCGGATCG"
def print_matrix(row):
var = ""; list_bases = ["A", "C", "G", "T"]
for i in range(len(row)):
var += list_bases[i] + "\t"
for j in row[i]:
var += str(j) + "\t"
var += "\n"
print(var)
def sustitution_matrix(seq1, seq2):
if len(seq1) == len(seq2):
prob_res = {}; list_pairs = []; dic_pairs = {}; list_bases = ["A", "C", "G", "T"];
k = 1; result_list = []; bases = ""; total_pairs = 0
total_residuos = len(seq1) + len(seq2)
prob_res["A"] = (seq1.count("A") + seq2.count("A")) / total_residuos
prob_res["C"] = (seq1.count("C") + seq2.count("C")) / total_residuos
prob_res["G"] = (seq1.count("G") + seq2.count("G")) / total_residuos
prob_res["T"] = (seq1.count("T") + seq2.count("T")) / total_residuos
print(prob_res, "\n")
for i in range(len(seq1)):
list_pairs.append(seq1[i] + seq2[i])
for j in list_pairs:
if not j in dic_pairs:
bases = j
if not bases[::-1] in dic_pairs:
if j == bases[::-1]:
total_pairs = list_pairs.count(j)
else:
total_pairs = list_pairs.count(j) + list_pairs.count(bases[::-1])
dic_pairs[j] = total_pairs / len(seq1)
dic_pairs[bases[::-1]] = dic_pairs[j]
print(dic_pairs, "\n")
for i in range(len(list_bases)):
list_prob = []
for j in range(len(list_bases)):
pro_1 = prob_res[list_bases[i]]
pro_2 = prob_res[list_bases[j]]
if (list_bases[i]+list_bases[j] in dic_pairs):
pro_both = dic_pairs[list_bases[i]+list_bases[j]] + 1
else:
pro_both = 1
if pro_1 == 0 or pro_2 == 0:
list_prob.append(0)
else:
list_prob.append( k * math.log10(pro_both / (pro_1 * pro_2)))
result_list.append(list_prob)
else:
print("Length of the sequences are different")
return result_list
print_matrix(sustitution_matrix(seq1, seq2)) | 33.257426 | 90 | 0.504019 |
import math
seq1 = "ACAGGTGGACCT"
seq2 = "ACTGGTCGACTT"
seq = "CTATATGG"
seq = "CCGGATCG"
def print_matrix(row):
var = ""; list_bases = ["A", "C", "G", "T"]
for i in range(len(row)):
var += list_bases[i] + "\t"
for j in row[i]:
var += str(j) + "\t"
var += "\n"
print(var)
def sustitution_matrix(seq1, seq2):
if len(seq1) == len(seq2):
prob_res = {}; list_pairs = []; dic_pairs = {}; list_bases = ["A", "C", "G", "T"];
k = 1; result_list = []; bases = ""; total_pairs = 0
total_residuos = len(seq1) + len(seq2)
prob_res["A"] = (seq1.count("A") + seq2.count("A")) / total_residuos
prob_res["C"] = (seq1.count("C") + seq2.count("C")) / total_residuos
prob_res["G"] = (seq1.count("G") + seq2.count("G")) / total_residuos
prob_res["T"] = (seq1.count("T") + seq2.count("T")) / total_residuos
print(prob_res, "\n")
for i in range(len(seq1)):
list_pairs.append(seq1[i] + seq2[i])
for j in list_pairs:
if not j in dic_pairs:
bases = j
if not bases[::-1] in dic_pairs:
if j == bases[::-1]:
total_pairs = list_pairs.count(j)
else:
total_pairs = list_pairs.count(j) + list_pairs.count(bases[::-1])
dic_pairs[j] = total_pairs / len(seq1)
dic_pairs[bases[::-1]] = dic_pairs[j]
print(dic_pairs, "\n")
for i in range(len(list_bases)):
list_prob = []
for j in range(len(list_bases)):
pro_1 = prob_res[list_bases[i]]
pro_2 = prob_res[list_bases[j]]
if (list_bases[i]+list_bases[j] in dic_pairs):
pro_both = dic_pairs[list_bases[i]+list_bases[j]] + 1
else:
pro_both = 1
if pro_1 == 0 or pro_2 == 0:
list_prob.append(0)
else:
list_prob.append( k * math.log10(pro_both / (pro_1 * pro_2)))
result_list.append(list_prob)
else:
print("Length of the sequences are different")
return result_list
print_matrix(sustitution_matrix(seq1, seq2)) | true | true |
f72495f95031d45cde0afbf3bc7be5495a179206 | 798 | py | Python | distributed-todos/services/todos/src/tasks/worker.py | cheperuiz/unlearn-python | f5b97090f8f1d014bd9d65ecc0c6232919271bfa | [
"MIT"
] | null | null | null | distributed-todos/services/todos/src/tasks/worker.py | cheperuiz/unlearn-python | f5b97090f8f1d014bd9d65ecc0c6232919271bfa | [
"MIT"
] | 2 | 2021-05-11T00:00:01.000Z | 2022-01-22T10:13:26.000Z | distributed-todos/services/todos/src/tasks/worker.py | cheperuiz/unlearn-dev | f5b97090f8f1d014bd9d65ecc0c6232919271bfa | [
"MIT"
] | null | null | null | import yaml
from celery import Celery
from pymongo import MongoClient
from models.todo_dao import MongoDAO
from models.todo import TodoSchema
from library.utils import replace_env, make_url
with open("/config/todos/default_config.yml", "r") as f:
config = yaml.load(f, yaml.SafeLoader)
replace_env(config)
url = make_url(config["database"]["mongo"], include_db=False)
client = MongoClient(url)
collection = client.todos.todos_collection
broker_url = make_url(config["celery"]["broker"])
results_backend_url = make_url(config["celery"]["results_backend"])
celery = Celery(__name__, broker=broker_url, backend=results_backend_url)
@celery.task(name="tasks.worker.get_all_todos")
def get_all_todos(dao=MongoDAO(collection, TodoSchema)):
return TodoSchema(many=True).dump(dao.get_all())
| 30.692308 | 73 | 0.784461 | import yaml
from celery import Celery
from pymongo import MongoClient
from models.todo_dao import MongoDAO
from models.todo import TodoSchema
from library.utils import replace_env, make_url
with open("/config/todos/default_config.yml", "r") as f:
config = yaml.load(f, yaml.SafeLoader)
replace_env(config)
url = make_url(config["database"]["mongo"], include_db=False)
client = MongoClient(url)
collection = client.todos.todos_collection
broker_url = make_url(config["celery"]["broker"])
results_backend_url = make_url(config["celery"]["results_backend"])
celery = Celery(__name__, broker=broker_url, backend=results_backend_url)
@celery.task(name="tasks.worker.get_all_todos")
def get_all_todos(dao=MongoDAO(collection, TodoSchema)):
return TodoSchema(many=True).dump(dao.get_all())
| true | true |
f7249601a426b85ab408d827623e20f212418c0d | 1,180 | py | Python | xlsxwriter/test/comparison/test_button06.py | haiyangd/XlsxWriter | 81f8c9435b3e03a1458bf9ba314b5d3f7508290f | [
"BSD-2-Clause-FreeBSD"
] | 3 | 2018-02-26T12:31:41.000Z | 2020-10-10T14:14:11.000Z | xlsxwriter/test/comparison/test_button06.py | haiyangd/XlsxWriter | 81f8c9435b3e03a1458bf9ba314b5d3f7508290f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | xlsxwriter/test/comparison/test_button06.py | haiyangd/XlsxWriter | 81f8c9435b3e03a1458bf9ba314b5d3f7508290f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013-2017, John McNamara, jmcnamara@cpan.org
#
from ..excel_comparsion_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.maxDiff = None
filename = 'button05.xlsx'
test_dir = 'xlsxwriter/test/comparison/'
self.got_filename = test_dir + '_test2_' + filename
self.exp_filename = test_dir + 'xlsx_files/' + filename
self.ignore_files = []
self.ignore_elements = {}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_button('C2', {'macro': 'my_macro',
'width': 128,
'height': 30
})
workbook.close()
self.assertExcelEqual()
| 26.222222 | 79 | 0.54322 | true | true | |
f72497b539e640bb7175711770e3eea6c3d373a3 | 2,122 | py | Python | service/generated_flatbuffers/tflite/SliceOptions.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 213 | 2021-06-11T01:15:16.000Z | 2022-02-25T16:18:57.000Z | service/generated_flatbuffers/tflite/SliceOptions.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 32 | 2021-06-17T17:58:54.000Z | 2022-02-02T05:58:10.000Z | service/generated_flatbuffers/tflite/SliceOptions.py | lcrh/falken | 7545431c7bfa34a9b45c2243cae40dbb58adefaa | [
"Apache-2.0"
] | 28 | 2021-06-17T17:34:21.000Z | 2022-03-24T14:05:20.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# automatically generated by the FlatBuffers compiler, do not modify
# namespace: tflite
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SliceOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSliceOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SliceOptions()
x.Init(buf, n + offset)
return x
@classmethod
def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
# SliceOptions
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def SliceOptionsStart(builder): builder.StartObject(0)
def SliceOptionsEnd(builder): return builder.EndObject()
class SliceOptionsT(object):
# SliceOptionsT
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
sliceOptions = SliceOptions()
sliceOptions.Init(buf, pos)
return cls.InitFromObj(sliceOptions)
@classmethod
def InitFromObj(cls, sliceOptions):
x = SliceOptionsT()
x._UnPack(sliceOptions)
return x
# SliceOptionsT
def _UnPack(self, sliceOptions):
if sliceOptions is None:
return
# SliceOptionsT
def Pack(self, builder):
SliceOptionsStart(builder)
sliceOptions = SliceOptionsEnd(builder)
return sliceOptions
| 29.068493 | 114 | 0.703582 |
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
class SliceOptions(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsSliceOptions(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = SliceOptions()
x.Init(buf, n + offset)
return x
@classmethod
def SliceOptionsBufferHasIdentifier(cls, buf, offset, size_prefixed=False):
return flatbuffers.util.BufferHasIdentifier(buf, offset, b"\x54\x46\x4C\x33", size_prefixed=size_prefixed)
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
def SliceOptionsStart(builder): builder.StartObject(0)
def SliceOptionsEnd(builder): return builder.EndObject()
class SliceOptionsT(object):
def __init__(self):
pass
@classmethod
def InitFromBuf(cls, buf, pos):
sliceOptions = SliceOptions()
sliceOptions.Init(buf, pos)
return cls.InitFromObj(sliceOptions)
@classmethod
def InitFromObj(cls, sliceOptions):
x = SliceOptionsT()
x._UnPack(sliceOptions)
return x
def _UnPack(self, sliceOptions):
if sliceOptions is None:
return
def Pack(self, builder):
SliceOptionsStart(builder)
sliceOptions = SliceOptionsEnd(builder)
return sliceOptions
| true | true |
f72498dcf1db718730a5fc9efce9cd8c757e4531 | 15,730 | py | Python | test/test_source_gdal.py | knowledgevis/large_image | ab5c213d3a68de8a2144707fc0dc1115d1e4664f | [
"Apache-2.0"
] | null | null | null | test/test_source_gdal.py | knowledgevis/large_image | ab5c213d3a68de8a2144707fc0dc1115d1e4664f | [
"Apache-2.0"
] | null | null | null | test/test_source_gdal.py | knowledgevis/large_image | ab5c213d3a68de8a2144707fc0dc1115d1e4664f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import glob
import json
import numpy
import os
import PIL.Image
import PIL.ImageChops
import pytest
import six
from large_image import constants
from large_image.exceptions import TileSourceException
import large_image_source_gdal
from . import utilities
def _assertImageMatches(image, testRootName, saveTestImageFailurePath='/tmp'):
"""
Check if an image matches any of a set of images.
Adapted from:
https://stackoverflow.com/questions/35176639/compare-images-python-pil
:param image: PIL image to compare or a binary string of the image.
:param testRootName: base name of the images to test. These images are
globbed in test_files/<testRootName>*.png.
:param saveTestImageFailurePath: if the image doesn't match any of the
test images, if this value is set, save the image to make it easier
to determine why it failed.
"""
if isinstance(image, six.binary_type):
image = PIL.Image.open(six.BytesIO(image))
image = image.convert('RGBA')
testDir = os.path.dirname(os.path.realpath(__file__))
testImagePaths = glob.glob(os.path.join(
testDir, 'test_files', testRootName + '*.png'))
testImages = [PIL.Image.open(testImagePath).convert('RGBA')
for testImagePath in testImagePaths]
diffs = [PIL.ImageChops.difference(image, testImage).getbbox()
for testImage in testImages]
if None not in diffs and saveTestImageFailurePath:
image.save(os.path.join(saveTestImageFailurePath, testRootName + '_test.png'))
assert None in diffs
def testTileFromGeotiffs():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 256
assert tileMetadata['sizeY'] == 256
assert tileMetadata['levels'] == 1
assert tileMetadata['bounds']['xmax'] == 597915.0
assert tileMetadata['bounds']['xmin'] == 367185.0
assert tileMetadata['bounds']['ymax'] == 3788115.0
assert tileMetadata['bounds']['ymin'] == 3552885.0
assert (tileMetadata['bounds']['srs'].strip() ==
'+proj=utm +zone=11 +datum=WGS84 +units=m +no_defs')
assert tileMetadata['geospatial']
# Check that we read some band data, too
assert len(tileMetadata['bands']) == 3
assert tileMetadata['bands'][2]['interpretation'] == 'green'
assert tileMetadata['bands'][2]['max'] == 212.0
assert tileMetadata['bands'][2]['min'] == 0.0
# Getting the metadata with a specified projection will be different
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857')
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 65536
assert tileMetadata['sizeY'] == 65536
assert tileMetadata['levels'] == 9
assert tileMetadata['bounds']['xmax'] == pytest.approx(-12906033, 1)
assert tileMetadata['bounds']['xmin'] == pytest.approx(-13184900, 1)
assert tileMetadata['bounds']['ymax'] == pytest.approx(4059661, 1)
assert tileMetadata['bounds']['ymin'] == pytest.approx(3777034, 1)
assert tileMetadata['bounds']['srs'] == '+init=epsg:3857'
assert tileMetadata['geospatial']
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=json.dumps({'band': -1}), encoding='PNG')
image = source.getTile(89, 207, 9)
_assertImageMatches(image, 'geotiff_9_89_207')
def testTileLinearStyleFromGeotiffs():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
style = json.dumps({'band': 1, 'min': 0, 'max': 100,
'palette': 'matplotlib.Plasma_6',
'scheme': 'linear'})
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=style, encoding='PNG')
image = source.getTile(22, 51, 7)
_assertImageMatches(image, 'geotiff_style_linear_7_22_51')
def testTileStyleBadInput():
def _assertStyleResponse(imagePath, style, message):
with pytest.raises(TileSourceException, match=message):
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=json.dumps(style), encoding='PNG')
source.getTile(22, 51, 7)
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
_assertStyleResponse(imagePath, {
'band': 1.1,
}, 'Band has to be a positive integer, -1, or a band interpretation found in the source.')
_assertStyleResponse(imagePath, {
'band': 500,
}, 'Band has to be a positive integer, -1, or a band interpretation found in the source.')
_assertStyleResponse(imagePath, {
'band': 1,
'palette': 'nonexistent.palette'
}, 'Palette is not a valid palettable path.')
_assertStyleResponse(imagePath, ['style'],
'Style is not a valid json object.')
def testThumbnailFromGeotiffs():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
# We get a thumbnail without a projection
image, mimeType = source.getThumbnail(encoding='PNG')
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
# We get a different thumbnail with a projection
source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
image2, mimeType = source.getThumbnail(encoding='PNG')
assert image2[:len(utilities.PNGHeader)] == utilities.PNGHeader
assert image != image2
def testPixel():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
# Test in pixel coordinates
source = large_image_source_gdal.GDALFileTileSource(imagePath)
pixel = source.getPixel(region={'left': 212, 'top': 198})
assert pixel == {
'r': 76, 'g': 78, 'b': 77, 'a': 255, 'bands': {1: 62.0, 2: 65.0, 3: 66.0}}
pixel = source.getPixel(region={'left': 2120, 'top': 198})
assert pixel == {}
# Test with a projection
source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
# Test with styles
style = json.dumps({'band': 1, 'min': 0, 'max': 100,
'palette': 'matplotlib.Plasma_6'})
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=style)
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})
assert pixel == {
'r': 247, 'g': 156, 'b': 60, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
# Test with palette as an array of colors
style = json.dumps({'band': 1, 'min': 0, 'max': 100,
'palette': ['#0000ff', '#00ff00', '#ff0000']})
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=style)
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})
assert pixel == {
'r': 137, 'g': 117, 'b': 0, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
# Test with projection units
source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'EPSG:3857'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
pixel = source.getPixel(region={'left': -117.975, 'top': 33.865, 'units': 'WGS84'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
# When the tile has a different projection, the pixel is the same as
# the band values.
source = large_image_source_gdal.GDALFileTileSource(imagePath)
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'EPSG:3857'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
def testSourceErrors():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
with pytest.raises(TileSourceException, match='must not be geographic'):
large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:4326')
imagePath = os.path.join(testDir, 'test_files', 'zero_gi.tif')
with pytest.raises(TileSourceException, match='cannot be opened via'):
large_image_source_gdal.GDALFileTileSource(imagePath)
imagePath = os.path.join(testDir, 'test_files', 'yb10kx5k.png')
with pytest.raises(TileSourceException, match='does not have a projected scale'):
large_image_source_gdal.GDALFileTileSource(imagePath)
def testStereographicProjection():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
# We will fail if we ask for a stereographic projection and don't
# specify unitsPerPixel
with pytest.raises(TileSourceException, match='unitsPerPixel must be specified'):
large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:3411')
# But will pass if unitsPerPixel is specified
large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:3411', unitsPerPixel=150000)
def testProj4Proj():
# Test obtaining pyproj.Proj projection values
proj4Proj = large_image_source_gdal.GDALFileTileSource._proj4Proj
proj = proj4Proj(b'epsg:4326')
assert proj4Proj(u'epsg:4326').srs == proj.srs
assert proj4Proj('proj4:EPSG:4326').srs == proj.srs
assert proj4Proj(4326) is None
def testConvertProjectionUnits():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
tsNoProj = large_image_source_gdal.GDALFileTileSource(imagePath)
result = tsNoProj._convertProjectionUnits(
-13024380, 3895303, None, None, None, None, 'EPSG:3857')
assert result[0] == pytest.approx(147, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2:] == (None, None, 'base_pixels')
result = tsNoProj._convertProjectionUnits(
None, None, -13080040, 3961860, None, None, 'EPSG:3857')
assert result[2] == pytest.approx(96, 1)
assert result[3] == pytest.approx(88, 1)
assert result[:2] == (None, None)
result = tsNoProj._convertProjectionUnits(
-117.5, 33, None, None, 0.5, 0.5, 'EPSG:4326')
assert result[0] == pytest.approx(96, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2] == pytest.approx(147, 1)
assert result[3] == pytest.approx(89, 1)
result = tsNoProj._convertProjectionUnits(
None, None, -117, 33.5, 0.5, 0.5, 'EPSG:4326')
assert result[0] == pytest.approx(96, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2] == pytest.approx(147, 1)
assert result[3] == pytest.approx(89, 1)
result = tsNoProj._convertProjectionUnits(
-117.5, 33, None, None, 0.5, 0.5, 'EPSG:4326', unitsWH='base_pixels')
assert result[0] == pytest.approx(96, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2:] == (None, None, 'base_pixels')
with pytest.raises(TileSourceException, match='Cannot convert'):
tsNoProj._convertProjectionUnits(
-117.5, None, -117, None, None, None, 'EPSG:4326')
tsProj = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
result = tsProj._convertProjectionUnits(
-13024380, 3895303, None, None, None, None, 'EPSG:3857')
assert result[0] == pytest.approx(-13024380, 1)
assert result[1] == pytest.approx(3895303, 1)
assert result[2:] == (None, None, 'projection')
def testGuardAgainstBadLatLong():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'global_dem.tif')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
bounds = source.getBounds(srs='EPSG:4326')
assert bounds['xmin'] == -180.00416667
assert bounds['xmax'] == 179.99583333
assert bounds['ymin'] == -89.99583333
assert bounds['ymax'] == 90
def testPalettizedGeotiff():
imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 687
assert tileMetadata['sizeY'] == 509
assert tileMetadata['levels'] == 3
assert tileMetadata['bounds']['srs'].strip().startswith(
'+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0')
assert tileMetadata['geospatial']
assert len(tileMetadata['bands']) == 1
assert tileMetadata['bands'][1]['interpretation'] == 'palette'
# Getting the metadata with a specified projection will be different
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', encoding='PNG')
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 65536
assert tileMetadata['sizeY'] == 65536
assert tileMetadata['levels'] == 9
assert tileMetadata['bounds']['xmax'] == pytest.approx(-7837888, 1)
assert tileMetadata['bounds']['xmin'] == pytest.approx(-8909162, 1)
assert tileMetadata['bounds']['ymax'] == pytest.approx(5755717, 1)
assert tileMetadata['bounds']['ymin'] == pytest.approx(4876273, 1)
assert tileMetadata['bounds']['srs'] == '+init=epsg:3857'
assert tileMetadata['geospatial']
image = source.getTile(37, 46, 7)
image = PIL.Image.open(six.BytesIO(image))
image = numpy.asarray(image)
assert list(image[0, 0, :]) == [0, 0, 0, 0]
assert list(image[255, 0, :]) == [221, 201, 201, 255]
def testRetileProjection():
imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')
ts = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
ti = ts.getSingleTile(tile_size=dict(width=1000, height=1000), tile_position=1000)
assert ti['tile'].size == 3000000
tile = ts.getTile(1178, 1507, 12)
assert len(tile) > 1000
def testInternalMetadata():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
metadata = source.getInternalMetadata()
assert metadata['driverShortName'] == 'GTiff'
def testGetRegionWithProjection():
imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')
ts = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
region, _ = ts.getRegion(output=dict(maxWidth=1024, maxHeight=1024),
format=constants.TILE_FORMAT_NUMPY)
assert region.shape == (1024, 1024, 4)
| 44.6875 | 94 | 0.676351 |
import glob
import json
import numpy
import os
import PIL.Image
import PIL.ImageChops
import pytest
import six
from large_image import constants
from large_image.exceptions import TileSourceException
import large_image_source_gdal
from . import utilities
def _assertImageMatches(image, testRootName, saveTestImageFailurePath='/tmp'):
if isinstance(image, six.binary_type):
image = PIL.Image.open(six.BytesIO(image))
image = image.convert('RGBA')
testDir = os.path.dirname(os.path.realpath(__file__))
testImagePaths = glob.glob(os.path.join(
testDir, 'test_files', testRootName + '*.png'))
testImages = [PIL.Image.open(testImagePath).convert('RGBA')
for testImagePath in testImagePaths]
diffs = [PIL.ImageChops.difference(image, testImage).getbbox()
for testImage in testImages]
if None not in diffs and saveTestImageFailurePath:
image.save(os.path.join(saveTestImageFailurePath, testRootName + '_test.png'))
assert None in diffs
def testTileFromGeotiffs():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 256
assert tileMetadata['sizeY'] == 256
assert tileMetadata['levels'] == 1
assert tileMetadata['bounds']['xmax'] == 597915.0
assert tileMetadata['bounds']['xmin'] == 367185.0
assert tileMetadata['bounds']['ymax'] == 3788115.0
assert tileMetadata['bounds']['ymin'] == 3552885.0
assert (tileMetadata['bounds']['srs'].strip() ==
'+proj=utm +zone=11 +datum=WGS84 +units=m +no_defs')
assert tileMetadata['geospatial']
assert len(tileMetadata['bands']) == 3
assert tileMetadata['bands'][2]['interpretation'] == 'green'
assert tileMetadata['bands'][2]['max'] == 212.0
assert tileMetadata['bands'][2]['min'] == 0.0
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857')
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 65536
assert tileMetadata['sizeY'] == 65536
assert tileMetadata['levels'] == 9
assert tileMetadata['bounds']['xmax'] == pytest.approx(-12906033, 1)
assert tileMetadata['bounds']['xmin'] == pytest.approx(-13184900, 1)
assert tileMetadata['bounds']['ymax'] == pytest.approx(4059661, 1)
assert tileMetadata['bounds']['ymin'] == pytest.approx(3777034, 1)
assert tileMetadata['bounds']['srs'] == '+init=epsg:3857'
assert tileMetadata['geospatial']
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=json.dumps({'band': -1}), encoding='PNG')
image = source.getTile(89, 207, 9)
_assertImageMatches(image, 'geotiff_9_89_207')
def testTileLinearStyleFromGeotiffs():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
style = json.dumps({'band': 1, 'min': 0, 'max': 100,
'palette': 'matplotlib.Plasma_6',
'scheme': 'linear'})
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=style, encoding='PNG')
image = source.getTile(22, 51, 7)
_assertImageMatches(image, 'geotiff_style_linear_7_22_51')
def testTileStyleBadInput():
def _assertStyleResponse(imagePath, style, message):
with pytest.raises(TileSourceException, match=message):
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=json.dumps(style), encoding='PNG')
source.getTile(22, 51, 7)
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
_assertStyleResponse(imagePath, {
'band': 1.1,
}, 'Band has to be a positive integer, -1, or a band interpretation found in the source.')
_assertStyleResponse(imagePath, {
'band': 500,
}, 'Band has to be a positive integer, -1, or a band interpretation found in the source.')
_assertStyleResponse(imagePath, {
'band': 1,
'palette': 'nonexistent.palette'
}, 'Palette is not a valid palettable path.')
_assertStyleResponse(imagePath, ['style'],
'Style is not a valid json object.')
def testThumbnailFromGeotiffs():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
image, mimeType = source.getThumbnail(encoding='PNG')
assert image[:len(utilities.PNGHeader)] == utilities.PNGHeader
source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
image2, mimeType = source.getThumbnail(encoding='PNG')
assert image2[:len(utilities.PNGHeader)] == utilities.PNGHeader
assert image != image2
def testPixel():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
pixel = source.getPixel(region={'left': 212, 'top': 198})
assert pixel == {
'r': 76, 'g': 78, 'b': 77, 'a': 255, 'bands': {1: 62.0, 2: 65.0, 3: 66.0}}
pixel = source.getPixel(region={'left': 2120, 'top': 198})
assert pixel == {}
source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
style = json.dumps({'band': 1, 'min': 0, 'max': 100,
'palette': 'matplotlib.Plasma_6'})
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=style)
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})
assert pixel == {
'r': 247, 'g': 156, 'b': 60, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
style = json.dumps({'band': 1, 'min': 0, 'max': 100,
'palette': ['#0000ff', '#00ff00', '#ff0000']})
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', style=style)
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'projection'})
assert pixel == {
'r': 137, 'g': 117, 'b': 0, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
source = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'EPSG:3857'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
pixel = source.getPixel(region={'left': -117.975, 'top': 33.865, 'units': 'WGS84'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
source = large_image_source_gdal.GDALFileTileSource(imagePath)
pixel = source.getPixel(region={'left': -13132910, 'top': 4010586, 'units': 'EPSG:3857'})
assert pixel == {
'r': 94, 'g': 98, 'b': 99, 'a': 255, 'bands': {1: 77.0, 2: 82.0, 3: 84.0}}
def testSourceErrors():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
with pytest.raises(TileSourceException, match='must not be geographic'):
large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:4326')
imagePath = os.path.join(testDir, 'test_files', 'zero_gi.tif')
with pytest.raises(TileSourceException, match='cannot be opened via'):
large_image_source_gdal.GDALFileTileSource(imagePath)
imagePath = os.path.join(testDir, 'test_files', 'yb10kx5k.png')
with pytest.raises(TileSourceException, match='does not have a projected scale'):
large_image_source_gdal.GDALFileTileSource(imagePath)
def testStereographicProjection():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
# specify unitsPerPixel
with pytest.raises(TileSourceException, match='unitsPerPixel must be specified'):
large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:3411')
# But will pass if unitsPerPixel is specified
large_image_source_gdal.GDALFileTileSource(imagePath, 'EPSG:3411', unitsPerPixel=150000)
def testProj4Proj():
# Test obtaining pyproj.Proj projection values
proj4Proj = large_image_source_gdal.GDALFileTileSource._proj4Proj
proj = proj4Proj(b'epsg:4326')
assert proj4Proj(u'epsg:4326').srs == proj.srs
assert proj4Proj('proj4:EPSG:4326').srs == proj.srs
assert proj4Proj(4326) is None
def testConvertProjectionUnits():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
tsNoProj = large_image_source_gdal.GDALFileTileSource(imagePath)
result = tsNoProj._convertProjectionUnits(
-13024380, 3895303, None, None, None, None, 'EPSG:3857')
assert result[0] == pytest.approx(147, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2:] == (None, None, 'base_pixels')
result = tsNoProj._convertProjectionUnits(
None, None, -13080040, 3961860, None, None, 'EPSG:3857')
assert result[2] == pytest.approx(96, 1)
assert result[3] == pytest.approx(88, 1)
assert result[:2] == (None, None)
result = tsNoProj._convertProjectionUnits(
-117.5, 33, None, None, 0.5, 0.5, 'EPSG:4326')
assert result[0] == pytest.approx(96, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2] == pytest.approx(147, 1)
assert result[3] == pytest.approx(89, 1)
result = tsNoProj._convertProjectionUnits(
None, None, -117, 33.5, 0.5, 0.5, 'EPSG:4326')
assert result[0] == pytest.approx(96, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2] == pytest.approx(147, 1)
assert result[3] == pytest.approx(89, 1)
result = tsNoProj._convertProjectionUnits(
-117.5, 33, None, None, 0.5, 0.5, 'EPSG:4326', unitsWH='base_pixels')
assert result[0] == pytest.approx(96, 1)
assert result[1] == pytest.approx(149, 1)
assert result[2:] == (None, None, 'base_pixels')
with pytest.raises(TileSourceException, match='Cannot convert'):
tsNoProj._convertProjectionUnits(
-117.5, None, -117, None, None, None, 'EPSG:4326')
tsProj = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
result = tsProj._convertProjectionUnits(
-13024380, 3895303, None, None, None, None, 'EPSG:3857')
assert result[0] == pytest.approx(-13024380, 1)
assert result[1] == pytest.approx(3895303, 1)
assert result[2:] == (None, None, 'projection')
def testGuardAgainstBadLatLong():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'global_dem.tif')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
bounds = source.getBounds(srs='EPSG:4326')
assert bounds['xmin'] == -180.00416667
assert bounds['xmax'] == 179.99583333
assert bounds['ymin'] == -89.99583333
assert bounds['ymax'] == 90
def testPalettizedGeotiff():
imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 687
assert tileMetadata['sizeY'] == 509
assert tileMetadata['levels'] == 3
assert tileMetadata['bounds']['srs'].strip().startswith(
'+proj=aea +lat_0=23 +lon_0=-96 +lat_1=29.5 +lat_2=45.5 +x_0=0 +y_0=0')
assert tileMetadata['geospatial']
assert len(tileMetadata['bands']) == 1
assert tileMetadata['bands'][1]['interpretation'] == 'palette'
# Getting the metadata with a specified projection will be different
source = large_image_source_gdal.GDALFileTileSource(
imagePath, projection='EPSG:3857', encoding='PNG')
tileMetadata = source.getMetadata()
assert tileMetadata['tileWidth'] == 256
assert tileMetadata['tileHeight'] == 256
assert tileMetadata['sizeX'] == 65536
assert tileMetadata['sizeY'] == 65536
assert tileMetadata['levels'] == 9
assert tileMetadata['bounds']['xmax'] == pytest.approx(-7837888, 1)
assert tileMetadata['bounds']['xmin'] == pytest.approx(-8909162, 1)
assert tileMetadata['bounds']['ymax'] == pytest.approx(5755717, 1)
assert tileMetadata['bounds']['ymin'] == pytest.approx(4876273, 1)
assert tileMetadata['bounds']['srs'] == '+init=epsg:3857'
assert tileMetadata['geospatial']
image = source.getTile(37, 46, 7)
image = PIL.Image.open(six.BytesIO(image))
image = numpy.asarray(image)
assert list(image[0, 0, :]) == [0, 0, 0, 0]
assert list(image[255, 0, :]) == [221, 201, 201, 255]
def testRetileProjection():
imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')
ts = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
ti = ts.getSingleTile(tile_size=dict(width=1000, height=1000), tile_position=1000)
assert ti['tile'].size == 3000000
tile = ts.getTile(1178, 1507, 12)
assert len(tile) > 1000
def testInternalMetadata():
testDir = os.path.dirname(os.path.realpath(__file__))
imagePath = os.path.join(testDir, 'test_files', 'rgb_geotiff.tiff')
source = large_image_source_gdal.GDALFileTileSource(imagePath)
metadata = source.getInternalMetadata()
assert metadata['driverShortName'] == 'GTiff'
def testGetRegionWithProjection():
imagePath = utilities.externaldata('data/landcover_sample_1000.tif.sha512')
ts = large_image_source_gdal.GDALFileTileSource(imagePath, projection='EPSG:3857')
region, _ = ts.getRegion(output=dict(maxWidth=1024, maxHeight=1024),
format=constants.TILE_FORMAT_NUMPY)
assert region.shape == (1024, 1024, 4)
| true | true |
f724993bfb65f691c47509e2ab9bc57c799df3c7 | 1,799 | py | Python | app/models/supply_code.py | uwhvz/uwhvz | 72805d0e55740c3d90251dd4b4e40bf5c9e296d1 | [
"MIT"
] | 2 | 2019-12-15T06:30:37.000Z | 2020-01-26T23:12:27.000Z | app/models/supply_code.py | uwhvz/uwhvz | 72805d0e55740c3d90251dd4b4e40bf5c9e296d1 | [
"MIT"
] | 37 | 2020-01-22T02:36:32.000Z | 2020-10-06T15:05:37.000Z | app/models/supply_code.py | uwhvz/uwhvz | 72805d0e55740c3d90251dd4b4e40bf5c9e296d1 | [
"MIT"
] | 2 | 2020-06-24T03:07:36.000Z | 2020-06-24T03:10:46.000Z | import uuid
from datetime import datetime
from django.db import models
from django.utils import timezone
from .game import Game
from .player import Player
from .util import generate_code
class SupplyCodeManager(models.Manager):
def create_supply_code(self, game: Game, value: 5, code: None) -> 'SupplyCode':
if code is None or code == '' or self.filter(code=code):
code = generate_code(6)
# For set of all supply codes, each code must be unique
while self.filter(code=code):
code = generate_code(6)
if type(value) is int:
value = int(value)
else:
value = 5
supply_code = self.model(code=code.upper(), game=game, value=value)
supply_code.save()
return supply_code
class SupplyCode(models.Model):
id: uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)
game: Game = models.ForeignKey(Game, on_delete=models.CASCADE)
code: str = models.CharField(max_length=6, unique=True)
value: int = models.IntegerField()
point_modifier: int = models.IntegerField(default=0)
active: bool = models.BooleanField(default=True)
claimed_by: Player = models.ForeignKey(Player, on_delete=models.CASCADE, null=True, blank=True)
claimed_at: datetime = models.DateTimeField(null=True, blank=True)
created_at: datetime = models.DateTimeField(auto_now_add=True)
modified_at: datetime = models.DateTimeField(auto_now=True)
objects = SupplyCodeManager()
def claim(self, player: Player, point_modifier: int) -> 'SupplyCode':
self.claimed_by = player
self.claimed_at = timezone.now()
self.point_modifier = point_modifier
self.save()
return self
def __str__(self):
return self.code
| 32.709091 | 99 | 0.678155 | import uuid
from datetime import datetime
from django.db import models
from django.utils import timezone
from .game import Game
from .player import Player
from .util import generate_code
class SupplyCodeManager(models.Manager):
def create_supply_code(self, game: Game, value: 5, code: None) -> 'SupplyCode':
if code is None or code == '' or self.filter(code=code):
code = generate_code(6)
while self.filter(code=code):
code = generate_code(6)
if type(value) is int:
value = int(value)
else:
value = 5
supply_code = self.model(code=code.upper(), game=game, value=value)
supply_code.save()
return supply_code
class SupplyCode(models.Model):
id: uuid = models.UUIDField(primary_key=True, default=uuid.uuid4)
game: Game = models.ForeignKey(Game, on_delete=models.CASCADE)
code: str = models.CharField(max_length=6, unique=True)
value: int = models.IntegerField()
point_modifier: int = models.IntegerField(default=0)
active: bool = models.BooleanField(default=True)
claimed_by: Player = models.ForeignKey(Player, on_delete=models.CASCADE, null=True, blank=True)
claimed_at: datetime = models.DateTimeField(null=True, blank=True)
created_at: datetime = models.DateTimeField(auto_now_add=True)
modified_at: datetime = models.DateTimeField(auto_now=True)
objects = SupplyCodeManager()
def claim(self, player: Player, point_modifier: int) -> 'SupplyCode':
self.claimed_by = player
self.claimed_at = timezone.now()
self.point_modifier = point_modifier
self.save()
return self
def __str__(self):
return self.code
| true | true |
f724993d2ddc89e333243979253d709c8bed589f | 49 | py | Python | je_editor/ui/ui_event/text_process/__init__.py | JE-Chen/je_editor | 2f18dedb6f0eb27c38668dc53f520739c8d5c6c6 | [
"MIT"
] | 1 | 2021-12-10T14:57:15.000Z | 2021-12-10T14:57:15.000Z | je_editor/ui/ui_event/text_process/__init__.py | JE-Chen/je_editor | 2f18dedb6f0eb27c38668dc53f520739c8d5c6c6 | [
"MIT"
] | null | null | null | je_editor/ui/ui_event/text_process/__init__.py | JE-Chen/je_editor | 2f18dedb6f0eb27c38668dc53f520739c8d5c6c6 | [
"MIT"
] | null | null | null | from je_editor.ui.ui_event.text_process import *
| 24.5 | 48 | 0.836735 | from je_editor.ui.ui_event.text_process import *
| true | true |
f7249959198c554003e2dec70da578ce0dcef41f | 5,791 | py | Python | GANs/stargan/generate.py | shikisawamura/nnabla-examples | baf4e4cc620dedbf4368683325c0fb868676850d | [
"Apache-2.0"
] | 1 | 2020-08-03T12:49:25.000Z | 2020-08-03T12:49:25.000Z | GANs/stargan/generate.py | takuseno/nnabla-examples | 070d25078ad3d5458744dbfd390cdd926e20e573 | [
"Apache-2.0"
] | null | null | null | GANs/stargan/generate.py | takuseno/nnabla-examples | 070d25078ad3d5458744dbfd390cdd926e20e573 | [
"Apache-2.0"
] | 1 | 2020-04-25T06:11:28.000Z | 2020-04-25T06:11:28.000Z | # Copyright (c) 2019 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import argparse
import nnabla as nn
from nnabla.ext_utils import get_extension_context
import numpy as np
import json
import glob
import model
from nnabla.utils.image_utils import imread, imsave, imresize
import functools
def saveimage(path, img):
img = (img * 0.5) + 0.5
imsave(path, img, channel_first=True)
def save_results(i, args, used_config, img_trg, lbl_trg):
target_attr_flags = lbl_trg.d[0].reshape(lbl_trg.d[0].size)
target_domain = "_".join([attr for idx, attr in zip(
target_attr_flags, used_config["selected_attrs"]) if bool(idx) is True])
result_x = img_trg.d[0]
filename = os.path.join(args.result_save_path,
"generated_{}_{}.png".format(i, target_domain))
saveimage(filename, result_x)
print("Saved {}.".format(filename))
return
def img_preprocess(img_paths, used_config):
image_size = used_config["image_size"]
images = list()
image_names = list()
for img_path in img_paths:
# Load (and resize) image and labels.
image = imread(img_path, num_channels=3, channel_first=True)
if image.dtype == np.uint8:
# Clip image's value from [0, 255] -> [0.0, 1.0]
image = image / 255.0
image = (image - 0.5) / 0.5 # Normalize
image = imresize(image, (image_size, image_size),
interpolate='bilinear', channel_first=True)
images.append(image)
image_names.append(img_path.split("/")[-1])
return np.asarray(images), np.asarray(image_names)
def get_user_input(used_config):
label = [0 for _ in range(used_config["c_dim"])]
choice = used_config["selected_attrs"]
for i, c in enumerate(choice):
print("Use '{}'?".format(c))
while 1:
ans = input("type yes or no: ")
if ans in ["yes", "no"]:
label[i] = 1 if ans == "yes" else 0
break
else:
print("type 'yes' or 'no'.")
#label[i] = int(bool(input("if yes, type 1, if not, just press enter:")))
return np.array(label)
def generate(args):
# Load the config data used for training.
with open(args.config, "r") as f:
used_config = json.load(f)
paramfile = args.pretrained_params
img_paths = glob.glob(os.path.join(args.test_image_path, "*.png"))
assert os.path.isfile(paramfile) and paramfile.split(
"/")[-1] == used_config["pretrained_params"], "Corresponding parameter file not found."
print("Learned attributes choice: {}".format(
used_config["selected_attrs"]))
# Prepare Generator and Discriminator based on user config.
generator = functools.partial(
model.generator, conv_dim=used_config["g_conv_dim"], c_dim=used_config["c_dim"], repeat_num=used_config["g_repeat_num"])
x_real = nn.Variable(
[1, 3, used_config["image_size"], used_config["image_size"]])
label_trg = nn.Variable([1, used_config["c_dim"], 1, 1])
with nn.parameter_scope("gen"):
x_fake = generator(x_real, label_trg)
x_fake.persistent = True
nn.load_parameters(paramfile) # load learned parameters.
images, image_names = img_preprocess(img_paths, used_config)
for i, (image, image_name) in enumerate(zip(images, image_names)):
# Get real images.
print("Source image: {}".format(image_name))
x_real.d = image
# Generate target domain based on user input.
label_trg.d = np.reshape(get_user_input(used_config), label_trg.shape)
# Execute image translation.
x_fake.forward(clear_no_need_grad=True)
save_results(i, args, used_config, x_fake, label_trg)
def get_args():
parser = argparse.ArgumentParser()
# Generation
parser.add_argument('--context', '-c', type=str,
default='cudnn', help="Extension path. ex) cpu, cudnn.")
parser.add_argument("--device-id", "-d", type=str, default='0',
help='Device ID the training run on. This is only valid if you specify `-c cudnn`.')
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument('--test-image-path', type=str,
help='a directory containing images used for image translation')
parser.add_argument('--result-save-path', type=str,
default="tmp.results", help='a directory to save generated images')
parser.add_argument('--pretrained-params', type=str, required=True,
help='path to the parameters used for generation.')
parser.add_argument('--config', type=str, required=True,
help='path to the config file used for generation.')
args = parser.parse_args()
if not os.path.isdir(args.result_save_path):
os.makedirs(args.result_save_path)
return args
def main():
args = get_args()
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
generate(args)
if __name__ == '__main__':
main()
| 35.746914 | 128 | 0.64842 |
import os
import argparse
import nnabla as nn
from nnabla.ext_utils import get_extension_context
import numpy as np
import json
import glob
import model
from nnabla.utils.image_utils import imread, imsave, imresize
import functools
def saveimage(path, img):
img = (img * 0.5) + 0.5
imsave(path, img, channel_first=True)
def save_results(i, args, used_config, img_trg, lbl_trg):
target_attr_flags = lbl_trg.d[0].reshape(lbl_trg.d[0].size)
target_domain = "_".join([attr for idx, attr in zip(
target_attr_flags, used_config["selected_attrs"]) if bool(idx) is True])
result_x = img_trg.d[0]
filename = os.path.join(args.result_save_path,
"generated_{}_{}.png".format(i, target_domain))
saveimage(filename, result_x)
print("Saved {}.".format(filename))
return
def img_preprocess(img_paths, used_config):
image_size = used_config["image_size"]
images = list()
image_names = list()
for img_path in img_paths:
image = imread(img_path, num_channels=3, channel_first=True)
if image.dtype == np.uint8:
image = image / 255.0
image = (image - 0.5) / 0.5 # Normalize
image = imresize(image, (image_size, image_size),
interpolate='bilinear', channel_first=True)
images.append(image)
image_names.append(img_path.split("/")[-1])
return np.asarray(images), np.asarray(image_names)
def get_user_input(used_config):
label = [0 for _ in range(used_config["c_dim"])]
choice = used_config["selected_attrs"]
for i, c in enumerate(choice):
print("Use '{}'?".format(c))
while 1:
ans = input("type yes or no: ")
if ans in ["yes", "no"]:
label[i] = 1 if ans == "yes" else 0
break
else:
print("type 'yes' or 'no'.")
#label[i] = int(bool(input("if yes, type 1, if not, just press enter:")))
return np.array(label)
def generate(args):
# Load the config data used for training.
with open(args.config, "r") as f:
used_config = json.load(f)
paramfile = args.pretrained_params
img_paths = glob.glob(os.path.join(args.test_image_path, "*.png"))
assert os.path.isfile(paramfile) and paramfile.split(
"/")[-1] == used_config["pretrained_params"], "Corresponding parameter file not found."
print("Learned attributes choice: {}".format(
used_config["selected_attrs"]))
# Prepare Generator and Discriminator based on user config.
generator = functools.partial(
model.generator, conv_dim=used_config["g_conv_dim"], c_dim=used_config["c_dim"], repeat_num=used_config["g_repeat_num"])
x_real = nn.Variable(
[1, 3, used_config["image_size"], used_config["image_size"]])
label_trg = nn.Variable([1, used_config["c_dim"], 1, 1])
with nn.parameter_scope("gen"):
x_fake = generator(x_real, label_trg)
x_fake.persistent = True
nn.load_parameters(paramfile) # load learned parameters.
images, image_names = img_preprocess(img_paths, used_config)
for i, (image, image_name) in enumerate(zip(images, image_names)):
# Get real images.
print("Source image: {}".format(image_name))
x_real.d = image
# Generate target domain based on user input.
label_trg.d = np.reshape(get_user_input(used_config), label_trg.shape)
# Execute image translation.
x_fake.forward(clear_no_need_grad=True)
save_results(i, args, used_config, x_fake, label_trg)
def get_args():
parser = argparse.ArgumentParser()
# Generation
parser.add_argument('--context', '-c', type=str,
default='cudnn', help="Extension path. ex) cpu, cudnn.")
parser.add_argument("--device-id", "-d", type=str, default='0',
help='Device ID the training run on. This is only valid if you specify `-c cudnn`.')
parser.add_argument("--type-config", "-t", type=str, default='float',
help='Type of computation. e.g. "float", "half".')
parser.add_argument('--test-image-path', type=str,
help='a directory containing images used for image translation')
parser.add_argument('--result-save-path', type=str,
default="tmp.results", help='a directory to save generated images')
parser.add_argument('--pretrained-params', type=str, required=True,
help='path to the parameters used for generation.')
parser.add_argument('--config', type=str, required=True,
help='path to the config file used for generation.')
args = parser.parse_args()
if not os.path.isdir(args.result_save_path):
os.makedirs(args.result_save_path)
return args
def main():
args = get_args()
ctx = get_extension_context(
args.context, device_id=args.device_id, type_config=args.type_config)
nn.set_default_context(ctx)
generate(args)
if __name__ == '__main__':
main()
| true | true |
f72499cae7a442091d28cd688401d7778b935183 | 486 | py | Python | tests/test_methods/test_calls.py | jackwardell/SlackTime | c40be4854a26084e1a368a975e220d613c14d8d8 | [
"Apache-2.0"
] | 2 | 2020-09-24T00:07:13.000Z | 2020-09-27T19:27:06.000Z | tests/test_methods/test_calls.py | jackwardell/SlackTime | c40be4854a26084e1a368a975e220d613c14d8d8 | [
"Apache-2.0"
] | null | null | null | tests/test_methods/test_calls.py | jackwardell/SlackTime | c40be4854a26084e1a368a975e220d613c14d8d8 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
def test_calls_add(slack_time):
assert slack_time.calls.add
def test_calls_end(slack_time):
assert slack_time.calls.end
def test_calls_info(slack_time):
assert slack_time.calls.info
def test_calls_update(slack_time):
assert slack_time.calls.update
def test_calls_participants_add(slack_time):
assert slack_time.calls.participants.add
def test_calls_participants_remove(slack_time):
assert slack_time.calls.participants.remove
| 18.692308 | 47 | 0.781893 |
def test_calls_add(slack_time):
assert slack_time.calls.add
def test_calls_end(slack_time):
assert slack_time.calls.end
def test_calls_info(slack_time):
assert slack_time.calls.info
def test_calls_update(slack_time):
assert slack_time.calls.update
def test_calls_participants_add(slack_time):
assert slack_time.calls.participants.add
def test_calls_participants_remove(slack_time):
assert slack_time.calls.participants.remove
| true | true |
f7249a82deee9e950217299ff16ff9a37d24226b | 407 | py | Python | recruiter_portal/urls.py | yhaojin/recruitment_pipeline | 860f25185864a1b259d08e88f42aca86f8206a4e | [
"MIT"
] | null | null | null | recruiter_portal/urls.py | yhaojin/recruitment_pipeline | 860f25185864a1b259d08e88f42aca86f8206a4e | [
"MIT"
] | null | null | null | recruiter_portal/urls.py | yhaojin/recruitment_pipeline | 860f25185864a1b259d08e88f42aca86f8206a4e | [
"MIT"
] | null | null | null | from django.urls import path
from .views import RecruiterIndexView, take_on_application_view, SaveTaskChangesView
urlpatterns = [
path('', RecruiterIndexView.as_view(), name='recruiter_portal'),
path('take_on_application/<application_pk>/', take_on_application_view, name='take_on_application'),
path('save_task_changes/<task_pk>/', SaveTaskChangesView.as_view(), name='save_task_changes'),
]
| 40.7 | 104 | 0.783784 | from django.urls import path
from .views import RecruiterIndexView, take_on_application_view, SaveTaskChangesView
urlpatterns = [
path('', RecruiterIndexView.as_view(), name='recruiter_portal'),
path('take_on_application/<application_pk>/', take_on_application_view, name='take_on_application'),
path('save_task_changes/<task_pk>/', SaveTaskChangesView.as_view(), name='save_task_changes'),
]
| true | true |
f7249ae817449c506e905d847baae94b9a76abf9 | 1,594 | py | Python | numpy/distutils/command/build.py | gmabey/numpy | 9e9ec3821c1d6a055543e54336ecb2c98ec42c5f | [
"BSD-3-Clause"
] | 652 | 2015-07-26T00:00:17.000Z | 2022-02-24T18:30:04.000Z | numpy/distutils/command/build.py | gmabey/numpy | 9e9ec3821c1d6a055543e54336ecb2c98ec42c5f | [
"BSD-3-Clause"
] | 8 | 2015-09-07T03:38:19.000Z | 2021-05-23T03:18:51.000Z | numpy/distutils/command/build.py | gmabey/numpy | 9e9ec3821c1d6a055543e54336ecb2c98ec42c5f | [
"BSD-3-Clause"
] | 40 | 2015-07-24T19:45:08.000Z | 2021-11-01T14:54:56.000Z | from __future__ import division, absolute_import, print_function
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('jobs=', 'j',
"number of parallel jobs"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.jobs = None
def finalize_options(self):
if self.jobs:
try:
self.jobs = int(self.jobs)
except ValueError:
raise ValueError("--jobs/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)
| 33.208333 | 74 | 0.604141 | from __future__ import division, absolute_import, print_function
import os
import sys
from distutils.command.build import build as old_build
from distutils.util import get_platform
from numpy.distutils.command.config_compiler import show_fortran_compilers
class build(old_build):
sub_commands = [('config_cc', lambda *args: True),
('config_fc', lambda *args: True),
('build_src', old_build.has_ext_modules),
] + old_build.sub_commands
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
('jobs=', 'j',
"number of parallel jobs"),
]
help_options = old_build.help_options + [
('help-fcompiler', None, "list available Fortran compilers",
show_fortran_compilers),
]
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
self.jobs = None
def finalize_options(self):
if self.jobs:
try:
self.jobs = int(self.jobs)
except ValueError:
raise ValueError("--jobs/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
def run(self):
old_build.run(self)
| true | true |
f7249c7b22c9dba1da94e74c57ecbd871341b782 | 1,824 | py | Python | test/functional/signmessages.py | Whiff-dev/WhiffV2.0 | e44fffbe9f448e2bd2362cc74057bc541594f58b | [
"MIT"
] | null | null | null | test/functional/signmessages.py | Whiff-dev/WhiffV2.0 | e44fffbe9f448e2bd2362cc74057bc541594f58b | [
"MIT"
] | null | null | null | test/functional/signmessages.py | Whiff-dev/WhiffV2.0 | e44fffbe9f448e2bd2362cc74057bc541594f58b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Copyright (c) 2017 The Whiff Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test RPC commands for signing and verifying messages."""
from test_framework.test_framework import WhiffTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(WhiffTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
expected_signature = 'H5vCbG+WhOeOPJ3jf6oux/1oSjkuIGZigCw4NW+A0/fSDlgdO4fMq0SWSfx7gUMB9kuG+t/0BQxtXaTCr7v9fGM='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert(not self.nodes[0].verifymessage(other_address, signature, message))
assert(not self.nodes[0].verifymessage(address, other_signature, message))
if __name__ == '__main__':
SignMessagesTest().main()
| 45.6 | 119 | 0.736294 |
from test_framework.test_framework import WhiffTestFramework
from test_framework.util import assert_equal
class SignMessagesTest(WhiffTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
message = 'This is just a test message'
self.log.info('test signing with priv_key')
priv_key = 'cUeKHd5orzT3mz8P9pxyREHfsWtVfgsfDjiZZBcjUBAaGk1BTj7N'
address = 'mpLQjfK79b7CCV4VMJWEWAj5Mpx8Up5zxB'
expected_signature = 'H5vCbG+WhOeOPJ3jf6oux/1oSjkuIGZigCw4NW+A0/fSDlgdO4fMq0SWSfx7gUMB9kuG+t/0BQxtXaTCr7v9fGM='
signature = self.nodes[0].signmessagewithprivkey(priv_key, message)
assert_equal(expected_signature, signature)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test signing with an address with wallet')
address = self.nodes[0].getnewaddress()
signature = self.nodes[0].signmessage(address, message)
assert(self.nodes[0].verifymessage(address, signature, message))
self.log.info('test verifying with another address should not work')
other_address = self.nodes[0].getnewaddress()
other_signature = self.nodes[0].signmessage(other_address, message)
assert(not self.nodes[0].verifymessage(other_address, signature, message))
assert(not self.nodes[0].verifymessage(address, other_signature, message))
if __name__ == '__main__':
SignMessagesTest().main()
| true | true |
f7249cef52c93864a1af11a36d8ddabcc8314339 | 201,806 | py | Python | cinder/tests/test_hp3par.py | AO-AO/cmss-cinder | d1212908041e431d0fa4d42b40c4459a193484e6 | [
"Apache-2.0"
] | null | null | null | cinder/tests/test_hp3par.py | AO-AO/cmss-cinder | d1212908041e431d0fa4d42b40c4459a193484e6 | [
"Apache-2.0"
] | null | null | null | cinder/tests/test_hp3par.py | AO-AO/cmss-cinder | d1212908041e431d0fa4d42b40c4459a193484e6 | [
"Apache-2.0"
] | null | null | null | #
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for OpenStack Cinder volume drivers."""
import mock
import ast
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests import fake_hp_3par_client as hp3parclient
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver
from cinder.volume.drivers.san.hp import hp_3par_iscsi as hpdriver
from cinder.volume import qos_specs
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
hpexceptions = hp3parclient.hpexceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HP3PAR_CPG = 'OpenStackCPG'
HP3PAR_CPG2 = 'fakepool'
HP3PAR_CPG_QOS = 'qospool'
HP3PAR_CPG_SNAP = 'OpenStackCPGSnap'
HP3PAR_USER_NAME = 'testUser'
HP3PAR_USER_PASS = 'testPassword'
HP3PAR_SAN_IP = '2.2.2.2'
HP3PAR_SAN_SSH_PORT = 999
HP3PAR_SAN_SSH_CON_TIMEOUT = 44
HP3PAR_SAN_SSH_PRIVATE = 'foobar'
GOODNESS_FUNCTION = \
"stats.capacity_utilization < 0.6? 100:25"
FILTER_FUNCTION = \
"stats.total_volumes < 400 && stats.capacity_utilization < 0.8"
CHAP_USER_KEY = "HPQ-cinder-CHAP-name"
CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret"
FLASH_CACHE_ENABLED = 1
FLASH_CACHE_DISABLED = 2
class HP3PARBaseDriver(object):
class CommentMatcher(object):
def __init__(self, f, expect):
self.assertEqual = f
self.expect = expect
def __eq__(self, actual):
actual_as_dict = dict(ast.literal_eval(actual))
self.assertEqual(self.expect, actual_as_dict)
return True
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000'
VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111'
VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222'
VOLUME_NAME = 'volume-' + VOLUME_ID
VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ'
# fake host on the 3par
FAKE_HOST = 'fakehost'
FAKE_CINDER_HOST = 'fakehost@foo#' + HP3PAR_CPG
USER_ID = '2689d9a913974c008b1d859013f23607'
PROJECT_ID = 'fac88235b9d64685a3530f73e490348f'
VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156'
FAKE_DESC = 'test description name'
FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1},
'portWWN': '0987654321234',
'protocol': 1,
'mode': 2,
'linkState': 4},
{'portPos': {'node': 6, 'slot': 1, 'cardPort': 1},
'portWWN': '123456789000987',
'protocol': 1,
'mode': 2,
'linkState': 4}]
QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50',
'qos:minIOPS': '100', 'qos:minBWS': '25',
'qos:latency': '25', 'qos:priority': 'low'}
QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'}
VVS_NAME = "myvvs"
FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1},
'protocol': 2,
'mode': 2,
'IPAddr': '1.1.1.2',
'iSCSIName': ('iqn.2000-05.com.3pardata:'
'21810002ac00383d'),
'linkState': 4}
volume = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': None}
volume_encrypted = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': None,
'encryption_key_id': 'fake_key'}
volume_dedup = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': 'dedup',
'volume_type_id': VOLUME_TYPE_ID_DEDUP}
volume_pool = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': volume_utils.append_host(FAKE_HOST, HP3PAR_CPG2),
'volume_type': None,
'volume_type_id': None}
volume_qos = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': 'gold'}
volume_flash_cache = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': VOLUME_TYPE_ID_FLASH_CACHE}
snapshot = {'name': SNAPSHOT_NAME,
'id': SNAPSHOT_ID,
'user_id': USER_ID,
'project_id': PROJECT_ID,
'volume_id': VOLUME_ID_SNAP,
'volume_name': VOLUME_NAME,
'status': 'creating',
'progress': '0%',
'volume_size': 2,
'display_name': 'fakesnap',
'display_description': FAKE_DESC}
wwn = ["123456789012345", "123456789054321"]
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': [wwn[0], wwn[1]],
'wwnns': ["223456789012345", "223456789054321"],
'host': FAKE_HOST}
volume_type = {'name': 'gold',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'qos:maxIOPS': '1000',
'qos:maxBWS': '50',
'qos:minIOPS': '100',
'qos:minBWS': '25',
'qos:latency': '25',
'qos:priority': 'low'},
'deleted_at': None,
'id': 'gold'}
volume_type_dedup = {'name': 'dedup',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'provisioning': 'dedup'},
'deleted_at': None,
'id': VOLUME_TYPE_ID_DEDUP}
volume_type_flash_cache = {'name': 'flash-cache-on',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'hp3par:flash_cache': 'true'},
'deleted_at': None,
'id': VOLUME_TYPE_ID_FLASH_CACHE}
flash_cache_3par_keys = {'flash_cache': 'true'}
cpgs = [
{'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]},
'incrementMiB': 8192},
'SAUsage': {'rawTotalMiB': 24576,
'rawUsedMiB': 768,
'totalMiB': 8192,
'usedMiB': 256},
'SDGrowth': {'LDLayout': {'RAIDType': 4,
'diskPatterns': [{'diskType': 2}]},
'incrementMiB': 32768},
'SDUsage': {'rawTotalMiB': 49152,
'rawUsedMiB': 1023,
'totalMiB': 36864,
'usedMiB': 1024 * 1},
'UsrUsage': {'rawTotalMiB': 57344,
'rawUsedMiB': 43349,
'totalMiB': 43008,
'usedMiB': 1024 * 20},
'additionalStates': [],
'degradedStates': [],
'failedStates': [],
'id': 5,
'name': HP3PAR_CPG,
'numFPVVs': 2,
'numTPVVs': 0,
'numTDVVs': 1,
'state': 1,
'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}]
TASK_DONE = 1
TASK_ACTIVE = 2
STATUS_DONE = {'status': 1}
STATUS_ACTIVE = {'status': 2}
mock_client_conf = {
'PORT_MODE_TARGET': 2,
'PORT_STATE_READY': 4,
'PORT_PROTO_ISCSI': 2,
'PORT_PROTO_FC': 1,
'TASK_DONE': TASK_DONE,
'TASK_ACTIVE': TASK_ACTIVE,
'HOST_EDIT_ADD': 1,
'CHAP_INITIATOR': 1,
'CHAP_TARGET': 2,
'getPorts.return_value': {
'members': FAKE_FC_PORTS + [FAKE_ISCSI_PORT]
}
}
RETYPE_VVS_NAME = "yourvvs"
RETYPE_HOST = {
u'host': u'mark-stack1@3parfc',
u'capabilities': {
'QoS_support': True,
u'location_info': u'HP3PARDriver:1234567:MARK_TEST_CPG',
u'timestamp': u'2014-06-04T19:03:32.485540',
u'allocated_capacity_gb': 0,
u'volume_backend_name': u'3parfc',
u'free_capacity_gb': u'infinite',
u'driver_version': u'2.0.3',
u'total_capacity_gb': u'infinite',
u'reserved_percentage': 0,
u'vendor_name': u'Hewlett-Packard',
u'storage_protocol': u'FC'
}
}
RETYPE_HOST_NOT3PAR = {
u'host': u'mark-stack1@3parfc',
u'capabilities': {
u'location_info': u'XXXDriverXXX:1610771:MARK_TEST_CPG',
}
}
RETYPE_QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'high'}
RETYPE_VOLUME_TYPE_ID = "FakeVolId"
RETYPE_VOLUME_TYPE_0 = {
'name': 'red',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': True,
'tdvv': False,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_1 = {
'name': 'white',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': VVS_NAME,
'qos': QOS,
'tpvv': True,
'tdvv': False,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_2 = {
'name': 'blue',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': True,
'tdvv': False,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_3 = {
'name': 'purple',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': False,
'tdvv': True,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_BAD_PERSONA = {
'name': 'bad_persona',
'id': 'any_id',
'extra_specs': {
'hp3par:persona': '99 - invalid'
}
}
RETYPE_VOLUME_TYPE_BAD_CPG = {
'name': 'bad_cpg',
'id': 'any_id',
'extra_specs': {
'cpg': 'bogus',
'snap_cpg': 'bogus',
'hp3par:persona': '2 - Generic-ALUA'
}
}
MANAGE_VOLUME_INFO = {
'userCPG': 'testUserCpg0',
'snapCPG': 'testSnapCpg0',
'provisioningType': 1,
'comment': "{'display_name': 'Foo Volume'}"
}
MV_INFO_WITH_NO_SNAPCPG = {
'userCPG': 'testUserCpg0',
'provisioningType': 1,
'comment': "{'display_name': 'Foo Volume'}"
}
RETYPE_TEST_COMMENT = "{'retype_test': 'test comment'}"
RETYPE_VOLUME_INFO_0 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol0',
'size': 1,
'host': RETYPE_HOST,
'userCPG': 'testUserCpg0',
'snapCPG': 'testSnapCpg0',
'provisioningType': 1,
'comment': RETYPE_TEST_COMMENT
}
RETYPE_TEST_COMMENT_1 = "{'retype_test': 'test comment 1'}"
RETYPE_VOLUME_INFO_1 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol1',
'size': 1,
'host': RETYPE_HOST,
'userCPG': HP3PAR_CPG,
'snapCPG': HP3PAR_CPG_SNAP,
'provisioningType': 1,
'comment': RETYPE_TEST_COMMENT
}
RETYPE_TEST_COMMENT_2 = "{'retype_test': 'test comment 2'}"
RETYPE_VOLUME_INFO_2 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol2',
'size': 1,
'host': RETYPE_HOST,
'userCPG': HP3PAR_CPG,
'snapCPG': HP3PAR_CPG_SNAP,
'provisioningType': 3,
'comment': RETYPE_TEST_COMMENT
}
# Test for when we don't get a snapCPG.
RETYPE_VOLUME_INFO_NO_SNAP = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol2',
'size': 1,
'host': RETYPE_HOST,
'userCPG': 'testUserCpg2',
'provisioningType': 1,
'comment': '{}'
}
RETYPE_CONF = {
'TASK_ACTIVE': TASK_ACTIVE,
'TASK_DONE': TASK_DONE,
'getTask.return_value': STATUS_DONE,
'getStorageSystemInfo.return_value': {'serialNumber': '1234567'},
'getVolume.return_value': RETYPE_VOLUME_INFO_0,
'modifyVolume.return_value': ("anyResponse", {'taskid': 1})
}
# 3PAR retype currently doesn't use the diff. Existing code and fresh info
# from the array work better for the most part. Some use of the diff was
# intentionally removed to make _retype more usable for other use cases.
RETYPE_DIFF = None
wsapi_version_312 = {'major': 1,
'build': 30102422,
'minor': 3,
'revision': 1}
wsapi_version_for_dedup = {'major': 1,
'build': 30201120,
'minor': 4,
'revision': 1}
wsapi_version_for_flash_cache = {'major': 1,
'build': 30201200,
'minor': 4,
'revision': 2}
# Use this to point to latest version of wsapi
wsapi_version_latest = wsapi_version_for_flash_cache
standard_login = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
missing_key_policy='AutoAddPolicy',
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=mock.ANY,
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT)]
standard_logout = [
mock.call.logout()]
def setup_configuration(self):
configuration = mock.Mock()
configuration.hp3par_debug = False
configuration.hp3par_username = HP3PAR_USER_NAME
configuration.hp3par_password = HP3PAR_USER_PASS
configuration.hp3par_api_url = 'https://1.1.1.1/api/v1'
configuration.hp3par_cpg = [HP3PAR_CPG, HP3PAR_CPG2]
configuration.hp3par_cpg_snap = HP3PAR_CPG_SNAP
configuration.iscsi_ip_address = '1.1.1.2'
configuration.iscsi_port = '1234'
configuration.san_ip = HP3PAR_SAN_IP
configuration.san_login = HP3PAR_USER_NAME
configuration.san_password = HP3PAR_USER_PASS
configuration.san_ssh_port = HP3PAR_SAN_SSH_PORT
configuration.ssh_conn_timeout = HP3PAR_SAN_SSH_CON_TIMEOUT
configuration.san_private_key = HP3PAR_SAN_SSH_PRIVATE
configuration.hp3par_snapshot_expiration = ""
configuration.hp3par_snapshot_retention = ""
configuration.hp3par_iscsi_ips = []
configuration.hp3par_iscsi_chap_enabled = False
configuration.goodness_function = GOODNESS_FUNCTION
configuration.filter_function = FILTER_FUNCTION
return configuration
@mock.patch(
'hp3parclient.client.HP3ParClient',
spec=True,
)
def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None):
_m_client = _m_client.return_value
# Configure the base constants, defaults etc...
_m_client.configure_mock(**self.mock_client_conf)
# If m_conf, drop those over the top of the base_conf.
if m_conf is not None:
_m_client.configure_mock(**m_conf)
if conf is None:
conf = self.setup_configuration()
self.driver = driver(configuration=conf)
self.driver.do_setup(None)
return _m_client
@mock.patch('hp3parclient.version', "3.0.9")
def test_unsupported_client_version(self):
self.assertRaises(exception.InvalidInput,
self.setup_driver)
@mock.patch('hp3parclient.version', "3.1.2")
def test_ssh_options(self):
expected_hosts_key_file = "test_hosts_key_file"
orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file
orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
CONF.ssh_hosts_key_file = expected_hosts_key_file
CONF.strict_ssh_host_key_policy = False
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(driver=hpfcdriver.HP3PARFCDriver)
CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file
CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=expected_hosts_key_file,
missing_key_policy="AutoAddPolicy",
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(
expected +
self.standard_logout)
@mock.patch('hp3parclient.version', "3.1.2")
def test_ssh_options_strict(self):
expected_hosts_key_file = "test_hosts_key_file"
orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file
orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
CONF.ssh_hosts_key_file = expected_hosts_key_file
CONF.strict_ssh_host_key_policy = True
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(driver=hpfcdriver.HP3PARFCDriver)
CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file
CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=expected_hosts_key_file,
missing_key_policy="RejectPolicy",
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(expected + self.standard_logout)
def test_task_waiter(self):
task_statuses = [self.STATUS_ACTIVE, self.STATUS_ACTIVE]
def side_effect(*args):
return task_statuses and task_statuses.pop(0) or self.STATUS_DONE
conf = {'getTask.side_effect': side_effect}
mock_client = self.setup_driver(mock_conf=conf)
task_id = 1234
interval = .001
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
waiter = common.TaskWaiter(mock_client, task_id, interval)
status = waiter.wait_for_task()
expected = [
mock.call.getTask(task_id),
mock.call.getTask(task_id),
mock.call.getTask(task_id)
]
mock_client.assert_has_calls(expected)
self.assertEqual(status, self.STATUS_DONE)
def test_create_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_volume(self.volume)
comment = (
'{"display_name": "Foo Volume", "type": "OpenStack",'
' "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_volume_in_pool(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_pool)
comment = (
'{"display_name": "Foo Volume", "type": "OpenStack",'
' "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG2,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_unsupported_dedup_volume_type(self, _mock_volume_types):
mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312)
_mock_volume_types.return_value = {
'name': 'dedup',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'provisioning': 'dedup',
'volume_type': self.volume_type_dedup}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.assertRaises(exception.InvalidInput,
common.get_volume_settings_from_type_id,
self.VOLUME_TYPE_ID_DEDUP,
"mock")
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type(self, _mock_volume_types):
mock_client = self.setup_driver()
expected_type_snap_cpg = "type_snap_cpg"
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': expected_type_snap_cpg,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_type_snap_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_cpg(self, _mock_volume_types):
mock_client = self.setup_driver()
expected_cpg = 'use_extra_specs_cpg'
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': expected_cpg,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(self.driver.configuration.hp3par_cpg_snap,
result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_conf_snap_cpg(
self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'volume_type': self.volume_type}}
conf = self.setup_configuration()
expected_snap_cpg = conf.hp3par_cpg_snap
mock_client = self.setup_driver(config=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_snap_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_conf_cpg(
self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'volume_type': self.volume_type}}
conf = self.setup_configuration()
conf.hp3par_cpg_snap = None
expected_cpg = conf.hp3par_cpg
mock_client = self.setup_driver(config=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_qos(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_qos)
comment = (
'{"volume_type_name": "gold", "display_name": "Foo Volume"'
', "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7'
'", "volume_type_id": "gold", "volume_id": "d03338a9-91'
'15-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_dedup(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'dedup',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'provisioning': 'dedup',
'volume_type': self.volume_type_dedup}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_dedup)
comment = (
'{"volume_type_name": "dedup", "display_name": "Foo Volume"'
', "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7'
'", "volume_type_id": "d03338a9-9115-48a3-8dfc-11111111111"'
', "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"'
', "qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': False,
'tdvv': True,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_flash_cache(self, _mock_volume_types):
# Setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'flash-cache-on',
'extra_specs': {
'cpg': HP3PAR_CPG2,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'hp3par:flash_cache': 'true',
'volume_type': self.volume_type_flash_cache}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
mock_client.getCPG.return_value = {'domain': None}
mock_client.FLASH_CACHE_ENABLED = FLASH_CACHE_ENABLED
mock_client.FLASH_CACHE_DISABLED = FLASH_CACHE_DISABLED
return_model = self.driver.create_volume(self.volume_flash_cache)
comment = (
'{"volume_type_name": "flash-cache-on", '
'"display_name": "Foo Volume", '
'"name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", '
'"volume_type_id": "d03338a9-9115-48a3-8dfc-22222222222", '
'"volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", '
'"qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None),
mock.call.createQoSRules(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
{'priority': 2}
),
mock.call.modifyVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw', flashCachePolicy=1),
mock.call.addVolumeToVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
'osv-0DM4qZEVSKON-DXN-NwVpw')]
mock_client.assert_has_calls(
[mock.call.getWsApiVersion()] +
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_unsupported_flash_cache_volume(self, _mock_volume_types):
mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312)
_mock_volume_types.return_value = {
'name': 'flash-cache-on',
'extra_specs': {
'cpg': HP3PAR_CPG2,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'hp3par:flash_cache': 'true',
'volume_type': self.volume_type_flash_cache}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.assertRaises(exception.InvalidInput,
common.get_flash_cache_policy,
self.flash_cache_3par_keys)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_not_3par(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidHost,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST_NOT3PAR)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_volume_not_found(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPNotFound,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_specs_error_reverts_snap_cpg(self, _mock_volume_types):
_mock_volume_types.side_effect = [
self.RETYPE_VOLUME_TYPE_1, self.RETYPE_VOLUME_TYPE_0]
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_0
# Fail the QOS setting to test the revert of the snap CPG rename.
mock_client.addVolumeToVolumeSet.side_effect = \
hpexceptions.HTTPForbidden
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.retype,
self.ctxt,
{'id': self.VOLUME_ID},
self.RETYPE_VOLUME_TYPE_0,
self.RETYPE_DIFF,
self.RETYPE_HOST)
old_settings = {
'snapCPG': self.RETYPE_VOLUME_INFO_0['snapCPG'],
'comment': self.RETYPE_VOLUME_INFO_0['comment']}
new_settings = {
'snapCPG': (
self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']),
'comment': mock.ANY}
expected = [
mock.call.modifyVolume(self.VOLUME_3PAR_NAME, new_settings)
]
mock_client.assert_has_calls(expected)
expected = [
mock.call.modifyVolume(self.VOLUME_3PAR_NAME, old_settings)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_revert_comment(self, _mock_volume_types):
_mock_volume_types.side_effect = [
self.RETYPE_VOLUME_TYPE_2, self.RETYPE_VOLUME_TYPE_1]
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_1
# Fail the QOS setting to test the revert of the snap CPG rename.
mock_client.deleteVolumeSet.side_effect = hpexceptions.HTTPForbidden
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.retype,
self.ctxt,
{'id': self.VOLUME_ID},
self.RETYPE_VOLUME_TYPE_2,
self.RETYPE_DIFF,
self.RETYPE_HOST)
original = {
'snapCPG': self.RETYPE_VOLUME_INFO_1['snapCPG'],
'comment': self.RETYPE_VOLUME_INFO_1['comment']}
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', original)]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_different_array(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': 'XXXXXXX'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidHost,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo()]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_across_cpg_domains(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = [
{'domain': 'domain1'},
{'domain': 'domain2'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.Invalid3PARDomain,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo(),
mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_across_snap_cpg_domains(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = [
{'domain': 'cpg_domain'},
{'domain': 'cpg_domain'},
{'domain': 'snap_cpg_domain_1'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.Invalid3PARDomain,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo(),
mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_to_bad_persona(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_PERSONA
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidInput,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_BAD_PERSONA,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_tune(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
volume = {'id': HP3PARBaseDriver.CLONE_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
retyped = self.driver.retype(
self.ctxt, volume, type_ref, None, self.RETYPE_HOST)
self.assertTrue(retyped)
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA',
{'comment': mock.ANY,
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'),
mock.call.addVolumeToVolumeSet('myvvs',
'osv-0DM4qZEVSKON-AAAAAAAAA'),
mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA',
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_qos_spec(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
cpg = "any_cpg"
snap_cpg = "any_cpg"
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common._retype(self.volume,
HP3PARBaseDriver.VOLUME_3PAR_NAME,
"old_type", "old_type_id",
HP3PARBaseDriver.RETYPE_HOST,
None, cpg, cpg, snap_cpg, snap_cpg,
True, False, False, True, None, None,
self.QOS_SPECS, self.RETYPE_QOS_SPECS,
None, None,
"{}")
expected = [
mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None),
mock.call.createQoSRules(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
{'ioMinGoal': 100, 'ioMaxLimit': 1000,
'bwMinGoalKB': 25600, 'bwMaxLimitKB': 51200,
'priority': 3,
'latencyGoal': 25}
),
mock.call.addVolumeToVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
'osv-0DM4qZEVSKON-DXN-NwVpw')]
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_dedup(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_3
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
cpg = "any_cpg"
snap_cpg = "any_cpg"
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common._retype(self.volume,
HP3PARBaseDriver.VOLUME_3PAR_NAME,
"old_type", "old_type_id",
HP3PARBaseDriver.RETYPE_HOST,
None, cpg, cpg, snap_cpg, snap_cpg,
True, False, False, True, None, None,
self.QOS_SPECS, self.RETYPE_QOS_SPECS,
None, None,
"{}")
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw',
{'action': 6,
'userCPG': 'any_cpg',
'conversionOperation': 3,
'tuneOperation': 1}),
mock.call.getTask(1)]
mock_client.assert_has_calls(expected)
def test_delete_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.delete_volume(self.volume)
expected = [mock.call.deleteVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_cloned_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': volume_utils.append_host(self.FAKE_HOST,
HP3PAR_CPG2),
'source_volid': HP3PARBaseDriver.VOLUME_ID}
src_vref = {}
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertIsNone(model_update)
expected = [
mock.call.copyVolume(
self.VOLUME_3PAR_NAME,
'osv-0DM4qZEVSKON-AAAAAAAAA',
HP3PAR_CPG2,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
'tdvv': False, 'online': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_cloned_qos_volume(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
mock_client = self.setup_driver()
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
src_vref = {}
volume = self.volume_qos.copy()
host = "TEST_HOST"
pool = "TEST_POOL"
volume_host = volume_utils.append_host(host, pool)
expected_cpg = pool
volume['id'] = HP3PARBaseDriver.CLONE_ID
volume['host'] = volume_host
volume['source_volid'] = HP3PARBaseDriver.VOLUME_ID
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertEqual(model_update, None)
expected = [
mock.call.getCPG(expected_cpg),
mock.call.copyVolume(
self.VOLUME_3PAR_NAME,
'osv-0DM4qZEVSKON-AAAAAAAAA',
expected_cpg,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
'tdvv': False, 'online': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_migrate_volume(self):
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1#CPG-FC1',
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': '{"qos": {}, "display_name": "Foo Volume"}',
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_with_type(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
display_name = 'Foo Volume'
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': display_name,
"volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'],
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
instance_host = 'stack@3parfc1#CPG-FC1'
host = {'host': instance_host,
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
# when the host and pool are the same we'll get None
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected_comment = {
"display_name": display_name,
"volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'],
"volume_type_name": self.RETYPE_VOLUME_TYPE_2['name'],
"vvs": self.RETYPE_VOLUME_TYPE_2['extra_specs']['vvs']
}
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': self.CommentMatcher(self.assertEqual,
expected_comment),
'snapCPG': self.RETYPE_VOLUME_TYPE_2
['extra_specs']['snap_cpg']}),
mock.call.modifyVolume(
osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY)
]
mock_client.assert_has_calls(
expected +
self.standard_logout)
def test_migrate_volume_diff_host(self):
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': 'different'},
}
mock_client = self.setup_driver(mock_conf=conf)
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((False, None), result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_diff_domain(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1#CPG-FC1',
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': '{"qos": {}, "display_name": "Foo Volume"}',
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY),
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
loc_info = 'HP3PARDriver:1234567:CPG-FC1'
protocol = "FC"
if self.properties['driver_volume_type'] == "iscsi":
protocol = "iSCSI"
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
new_comment = {"qos": {},
"retype_test": "test comment"}
expected = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual, new_comment),
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1),
mock.call.logout()
]
mock_client.assert_has_calls(expected)
self.assertIsNotNone(result)
self.assertEqual((True, {'host': 'stack@3parfc1#OpenStackCPG'}),
result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
protocol = "OTHER"
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
loc_info = 'HP3PARDriver:1234567:CPG-FC1'
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((False, None), result)
expected = []
mock_client.assert_has_calls(expected)
def test_attach_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.attach_volume(context.get_admin_context(),
self.volume,
'abcdef',
'newhost',
'/dev/vdb')
expected = [
mock.call.setVolumeMetaData(
self.VOLUME_3PAR_NAME,
'HPQ-CS-instance_uuid',
'abcdef')]
mock_client.assert_has_calls(expected)
# test the exception
mock_client.setVolumeMetaData.side_effect = Exception('Custom ex')
self.assertRaises(exception.CinderException,
self.driver.attach_volume,
context.get_admin_context(),
self.volume,
'abcdef',
'newhost',
'/dev/vdb')
def test_detach_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.detach_volume(context.get_admin_context(), self.volume,
None)
expected = [
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME,
'HPQ-CS-instance_uuid')]
mock_client.assert_has_calls(expected)
# test the exception
mock_client.removeVolumeMetaData.side_effect = Exception(
'Custom ex')
self.assertRaises(exception.CinderException,
self.driver.detach_volume,
context.get_admin_context(),
self.volume, None)
def test_create_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
comment = (
'{"volume_id": "761fc5e5-5191-4ec7-aeba-33e36de44156",'
' "display_name": "fakesnap",'
' "description": "test description name",'
' "volume_name":'
' "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
'oss-L4I73ONuTci9Fd4ceij-MQ',
'osv-dh-F5VGRTseuujPjbeRBVg',
{
'comment': comment,
'readOnly': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_delete_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.delete_snapshot(self.snapshot)
expected = [
mock.call.deleteVolume('oss-L4I73ONuTci9Fd4ceij-MQ')]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_delete_snapshot_in_use(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
ex = hpexceptions.HTTPConflict("In use")
mock_client.deleteVolume = mock.Mock(side_effect=ex)
# Deleting the snapshot that a volume is dependent on should fail
self.assertRaises(exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
def test_delete_snapshot_not_found(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
try:
ex = hpexceptions.HTTPNotFound("not found")
mock_client.deleteVolume = mock.Mock(side_effect=ex)
self.driver.delete_snapshot(self.snapshot)
except Exception:
self.fail("Deleting a snapshot that is missing should act "
"as if it worked.")
def test_create_volume_from_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model_update = self.driver.create_volume_from_snapshot(
self.volume,
self.snapshot)
self.assertIsNone(model_update)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
volume = self.volume.copy()
volume['size'] = 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
def test_create_volume_from_snapshot_and_extend(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 1},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = self.volume.copy()
volume['size'] = self.volume['size'] + 10
model_update = self.driver.create_volume_from_snapshot(
volume,
self.snapshot)
self.assertEqual(model_update, None)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
omv_matcher = 'omv-' + volume_name_3par
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False}),
mock.call.copyVolume(
osv_matcher, omv_matcher, HP3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
mock.call.getVolume(osv_matcher),
mock.call.deleteVolume(osv_matcher),
mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}),
mock.call.growVolume(osv_matcher, 10 * 1024)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_from_snapshot_and_extend_with_qos(
self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 1},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = self.volume_qos.copy()
volume['size'] = self.volume['size'] + 10
model_update = self.driver.create_volume_from_snapshot(
volume,
self.snapshot)
self.assertEqual(model_update, None)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
omv_matcher = 'omv-' + volume_name_3par
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False}),
mock.call.getCPG(HP3PAR_CPG),
mock.call.copyVolume(
osv_matcher, omv_matcher, HP3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
mock.call.getVolume(osv_matcher),
mock.call.deleteVolume(osv_matcher),
mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}),
mock.call.growVolume(osv_matcher, 10 * 1024)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_volume_from_snapshot_and_extend_copy_fail(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 4,
'failure message': 'out of disk space'},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = self.volume.copy()
volume['size'] = self.volume['size'] + 10
self.assertRaises(exception.CinderException,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_from_snapshot_qos(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
self.driver.create_volume_from_snapshot(
self.volume_qos,
self.snapshot)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ', {
'comment': comment,
'readOnly': False})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
volume = self.volume.copy()
volume['size'] = 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.terminate_connection(
self.volume,
self.connector,
force=True)
expected = [
mock.call.queryHost(iqns=[self.connector['initiator']]),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_USER_KEY),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_update_volume_key_value_pair(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
key = 'a'
value = 'b'
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common.update_volume_key_value_pair(
self.volume,
key,
value)
expected = [
mock.call.setVolumeMetaData(self.VOLUME_3PAR_NAME, key, value)]
mock_client.assert_has_calls(expected)
# check exception
mock_client.setVolumeMetaData.side_effect = Exception('fake')
self.assertRaises(exception.VolumeBackendAPIException,
common.update_volume_key_value_pair,
self.volume,
None,
'b')
def test_clear_volume_key_value_pair(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
key = 'a'
common = self.driver._login()
common.clear_volume_key_value_pair(self.volume, key)
expected = [
mock.call.removeVolumeMetaData(self.VOLUME_3PAR_NAME, key)]
mock_client.assert_has_calls(expected)
# check the exception
mock_client.removeVolumeMetaData.side_effect = Exception('fake')
self.assertRaises(exception.VolumeBackendAPIException,
common.clear_volume_key_value_pair,
self.volume,
None)
def test_extend_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
growth_size_mib = grow_size * units.Ki
expected = [
mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)]
mock_client.assert_has_calls(expected)
def test_extend_volume_non_base(self):
extend_ex = hpexceptions.HTTPForbidden(error={'code': 150})
conf = {
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {},
# Throw an exception first time only
'growVolume.side_effect': [extend_ex,
None],
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
self.assertEqual(2, mock_client.growVolume.call_count)
def test_extend_volume_non_base_failure(self):
extend_ex = hpexceptions.HTTPForbidden(error={'code': 150})
conf = {
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {},
# Always fail
'growVolume.side_effect': extend_ex
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.extend_volume,
self.volume,
str(new_size))
def test_get_ports(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = {
'members': [
{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.120.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8},
{'portWWN': '20210002AC00383D',
'protocol': 1,
'linkState': 4,
'mode': 2,
'device': ['cage2'],
'nodeWWN': '20210002AC00383D',
'type': 2,
'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
ports = common.get_ports()['members']
self.assertEqual(len(ports), 3)
def test_get_by_qos_spec_with_scoping(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'})
def test_get_by_qos_spec(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
qos_ref = qos_specs.create(
self.ctxt,
'qos-specs-1',
self.QOS_SPECS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'})
def test_get_by_qos_by_type_only(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '100', 'maxBWS': '50',
'minIOPS': '10', 'minBWS': '20',
'latency': '5', 'priority': 'high'})
def test_create_vlun(self):
host = 'fake-host'
lun_id = 11
nsp = '1:2:3'
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" %
{'name': self.VOLUME_NAME,
'lunid': lun_id,
'host': host,
'nsp': nsp})
mock_client.createVLUN.return_value = location
expected_info = {'volume_name': self.VOLUME_NAME,
'lun_id': lun_id,
'host_name': host,
'nsp': nsp}
common = self.driver._login()
vlun_info = common._create_3par_vlun(
self.VOLUME_NAME,
host,
nsp)
self.assertEqual(expected_info, vlun_info)
location = ("%(name)s,%(lunid)s,%(host)s" %
{'name': self.VOLUME_NAME,
'lunid': lun_id,
'host': host})
mock_client.createVLUN.return_value = location
expected_info = {'volume_name': self.VOLUME_NAME,
'lun_id': lun_id,
'host_name': host}
vlun_info = common._create_3par_vlun(
self.VOLUME_NAME,
host,
None)
self.assertEqual(expected_info, vlun_info)
def test__get_existing_volume_ref_name(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
result = common._get_existing_volume_ref_name(existing_ref)
self.assertEqual(unm_matcher, result)
existing_ref = {'source-id': self.volume['id']}
result = common._get_existing_volume_ref_name(existing_ref)
self.assertEqual(unm_matcher, result)
existing_ref = {'bad-key': 'foo'}
self.assertRaises(
exception.ManageExistingInvalidReference,
common._get_existing_volume_ref_name,
existing_ref)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
mock_client = self.setup_driver()
new_comment = {"display_name": "Foo Volume",
"name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",
"volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e",
"type": "OpenStack"}
volume = {'display_name': None,
'host': self.FAKE_CINDER_HOST,
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
vvs_matcher = common._get_3par_vvs_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
expected_obj = {'display_name': 'Foo Volume'}
obj = self.driver.manage_existing(volume, existing_ref)
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(
self.assertEqual, new_comment)}),
]
retype_comment_qos = {
"display_name": "Foo Volume",
"volume_type_name": self.volume_type['name'],
"volume_type_id": self.volume_type['id'],
"qos": {
'maxIOPS': '1000',
'maxBWS': '50',
'minIOPS': '100',
'minBWS': '25',
'latency': '25',
'priority': 'low'
}
}
expected_snap_cpg = HP3PAR_CPG_SNAP
expected_retype_modify = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual,
retype_comment_qos),
'snapCPG': expected_snap_cpg}),
mock.call.deleteVolumeSet(vvs_matcher),
]
expected_retype_specs = [
mock.call.createVolumeSet(vvs_matcher, None),
mock.call.createQoSRules(
vvs_matcher,
{'ioMinGoal': 100, 'ioMaxLimit': 1000,
'bwMinGoalKB': 25600, 'priority': 1, 'latencyGoal': 25,
'bwMaxLimitKB': 51200}),
mock.call.addVolumeToVolumeSet(vvs_matcher, osv_matcher),
mock.call.modifyVolume(
osv_matcher,
{'action': 6,
'userCPG': HP3PAR_CPG,
'conversionOperation': 1, 'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
mock_client.assert_has_calls(expected_retype_modify)
mock_client.assert_has_calls(
expected_retype_specs +
self.standard_logout)
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
mock_client = self.setup_driver()
new_comment = {"display_name": "Foo Volume",
"name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",
"volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e",
"type": "OpenStack"}
volume = {'display_name': None,
'host': 'my-stack1@3parxxx#CPGNOTUSED',
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MV_INFO_WITH_NO_SNAPCPG
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
expected_obj = {'display_name': 'Foo Volume'}
obj = self.driver.manage_existing(volume, existing_ref)
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(
existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(self.assertEqual,
new_comment),
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_vvs(self, _mock_volume_types):
test_volume_type = self.RETYPE_VOLUME_TYPE_2
vvs = test_volume_type['extra_specs']['vvs']
_mock_volume_types.return_value = test_volume_type
mock_client = self.setup_driver()
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
id = '007abcde-7579-40bc-8f90-a20b3902283e'
new_comment = {"display_name": "Test Volume",
"name": ("volume-%s" % id),
"volume_id": id,
"type": "OpenStack"}
volume = {'display_name': 'Test Volume',
'host': 'my-stack1@3parxxx#CPGNOTUSED',
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': id}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
vvs_matcher = common._get_3par_vvs_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Test Volume'}
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(
self.assertEqual, new_comment)})
]
retype_comment_vvs = {
"display_name": "Foo Volume",
"volume_type_name": test_volume_type['name'],
"volume_type_id": test_volume_type['id'],
"vvs": vvs
}
expected_retype = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual,
retype_comment_vvs),
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.deleteVolumeSet(vvs_matcher),
mock.call.addVolumeToVolumeSet(vvs, osv_matcher),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPGNOTUSED',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
mock_client.assert_has_calls(
expected_retype +
self.standard_logout)
self.assertEqual(expected_obj, obj)
def test_manage_existing_no_volume_type(self):
mock_client = self.setup_driver()
comment = (
'{"display_name": "Foo Volume"}')
new_comment = (
'{"type": "OpenStack",'
' "display_name": "Foo Volume",'
' "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",'
' "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e"}')
volume = {'display_name': None,
'volume_type': None,
'volume_type_id': None,
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = {'comment': comment,
'userCPG': 'testUserCpg0'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Foo Volume'}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment,
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
volume['display_name'] = 'Test Volume'
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Test Volume'}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment,
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
mock_client.getVolume.return_value = {'userCPG': 'testUserCpg0'}
volume['display_name'] = None
common = self.driver._login()
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': None}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment,
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
def test_manage_existing_invalid_input(self):
mock_client = self.setup_driver()
volume = {'display_name': None,
'volume_type': None,
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound('fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_manage_existing_volume_type_exception(self):
mock_client = self.setup_driver()
comment = (
'{"display_name": "Foo Volume"}')
volume = {'display_name': None,
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = {'comment': comment}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_retype_exception(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
volume = {'display_name': None,
'host': 'stack1@3pariscsi#POOL1',
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
mock_client.getCPG.side_effect = [
{'domain': 'domain1'},
{'domain': 'domain2'},
{'domain': 'domain3'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.Invalid3PARDomain,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [
mock.call.getVolume(unm_matcher),
mock.call.modifyVolume(
unm_matcher, {
'newName': osv_matcher,
'comment': mock.ANY}),
mock.call.getCPG('POOL1'),
mock.call.getVolume(osv_matcher),
mock.call.getCPG('testUserCpg0'),
mock.call.getCPG('POOL1'),
mock.call.modifyVolume(
osv_matcher, {'newName': unm_matcher,
'comment': self.MANAGE_VOLUME_INFO
['comment']})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_manage_existing_get_size(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'sizeMiB': 2048}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
volume = {}
existing_ref = {'source-name': unm_matcher}
size = self.driver.manage_existing_get_size(volume, existing_ref)
expected_size = 2
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_size, size)
def test_manage_existing_get_size_invalid_reference(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = {}
existing_ref = {'source-name': self.VOLUME_3PAR_NAME}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.standard_login +
self.standard_logout)
existing_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.standard_login +
self.standard_logout)
def test_manage_existing_get_size_invalid_input(self):
mock_client = self.setup_driver()
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound('fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
volume = {}
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_unmanage(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.driver.unmanage(self.volume)
osv_matcher = common._get_3par_vol_name(self.volume['id'])
unm_matcher = common._get_3par_unm_name(self.volume['id'])
expected = [
mock.call.modifyVolume(osv_matcher, {'newName': unm_matcher})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test__safe_hostname(self):
long_hostname = "abc123abc123abc123abc123abc123abc123"
fixed_hostname = "abc123abc123abc123abc123abc123a"
common = hpcommon.HP3PARCommon(None)
safe_host = common._safe_hostname(long_hostname)
self.assertEqual(fixed_hostname, safe_host)
class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase):
properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'encrypted': False,
'target_lun': 90,
'target_wwn': ['0987654321234', '123456789000987'],
'target_discovered': True,
'initiator_target_map': {'123456789012345':
['0987654321234', '123456789000987'],
'123456789054321':
['0987654321234', '123456789000987'],
}}}
def setup_driver(self, config=None, mock_conf=None, wsapi_version=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpfcdriver.HP3PARFCDriver)
if wsapi_version:
mock_client.getWsApiVersion.return_value = (
wsapi_version)
else:
mock_client.getWsApiVersion.return_value = (
self.wsapi_version_latest)
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getVLUN.side_effect = [
hpexceptions.HTTPNotFound('fake')]
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getVLUN(self.VOLUME_3PAR_NAME),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, self.properties)
@mock.patch('cinder.zonemanager.utils.create_lookup_service')
def test_initialize_connection_with_lookup_single_nsp(self, mock_lookup):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
class fake_lookup_object(object):
def get_device_mapping_from_network(self, connector, target_wwns):
fake_map = {
'FAB_1': {
'target_port_wwn_list': ['0987654321234'],
'initiator_port_wwn_list': ['123456789012345']
}
}
return fake_map
mock_lookup.return_value = fake_lookup_object()
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getVLUN.side_effect = [
hpexceptions.HTTPNotFound('fake')]
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': [self.wwn[0]],
'wwnns': ["223456789012345"],
'host': self.FAKE_HOST}
expected_properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'encrypted': False,
'target_lun': 90,
'target_wwn': ['0987654321234'],
'target_discovered': True,
'initiator_target_map': {'123456789012345':
['0987654321234']
}}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(self.volume, connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.ANY,
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getVLUN(self.VOLUME_3PAR_NAME),
mock.call.getPorts(),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST,
portPos={'node': 7, 'slot': 1, 'cardPort': 1}),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, expected_properties)
def test_initialize_connection_encrypted(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getVLUN.side_effect = [
hpexceptions.HTTPNotFound('fake')]
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume_encrypted,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getVLUN(self.VOLUME_3PAR_NAME),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
expected_properties = self.properties
expected_properties['data']['encrypted'] = True
self.assertDictMatch(result, expected_properties)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
effects = [
[{'active': True, 'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}],
hpexceptions.HTTPNotFound,
hpexceptions.HTTPNotFound]
mock_client.getHostVLUNs.side_effect = effects
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
expected = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
# mock some deleteHost exceptions that are handled
delete_with_vlun = hpexceptions.HTTPConflict(
error={'message': "has exported VLUN"})
delete_with_hostset = hpexceptions.HTTPConflict(
error={'message': "host is a member of a set"})
mock_client.deleteHost = mock.Mock(
side_effect=[delete_with_vlun, delete_with_hostset])
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch('cinder.zonemanager.utils.create_lookup_service')
def test_terminate_connection_with_lookup(self, mock_lookup):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
class fake_lookup_object(object):
def get_device_mapping_from_network(self, connector, target_wwns):
fake_map = {
'FAB_1': {
'target_port_wwn_list': ['0987654321234'],
'initiator_port_wwn_list': ['123456789012345']
}
}
return fake_map
mock_lookup.return_value = fake_lookup_object()
mock_client = self.setup_driver()
effects = [
[{'active': True, 'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}],
hpexceptions.HTTPNotFound,
hpexceptions.HTTPNotFound]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.side_effect = effects
expected = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
# mock some deleteHost exceptions that are handled
delete_with_vlun = hpexceptions.HTTPConflict(
error={'message': "has exported VLUN"})
delete_with_hostset = hpexceptions.HTTPConflict(
error={'message': "host is a member of a set"})
mock_client.deleteHost = mock.Mock(
side_effect=[delete_with_vlun, delete_with_hostset])
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_terminate_connection_more_vols(self):
mock_client = self.setup_driver()
# mock more than one vlun on the host (don't even try to remove host)
mock_client.getHostVLUNs.return_value = \
[
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0},
{'active': True,
'volumeName': 'there-is-another-volume',
'lun': None, 'type': 0},
]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
expect_less = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expect_less +
self.standard_logout)
self.assertNotIn('initiator_target_map', conn_info['data'])
def test_get_volume_stats(self):
# setup_mock_client drive with the configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.filter_function = FILTER_FUNCTION
config.goodness_function = GOODNESS_FUNCTION
mock_client = self.setup_driver(config=config)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': '1234'
}
# cpg has no limit
mock_client.getCPGAvailableSpace.return_value = {
"capacityEfficiency": {u'compaction': 594.4},
"rawFreeMiB": 1024.0 * 6,
"usableFreeMiB": 1024.0 * 3
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
stats = self.driver.get_volume_stats(True)
const = 0.0009765625
self.assertEqual(stats['storage_protocol'], 'FC')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
self.assertEqual(stats['pools'][0]['capacity_utilization'], 87.5)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
expected = [
mock.call.getStorageSystemInfo(),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPGAvailableSpace(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2),
mock.call.getCPGAvailableSpace(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'FC')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
self.assertEqual(stats['pools'][0]['capacity_utilization'], 87.5)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'FC')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'],
total_capacity_gb)
free_capacity_gb = int(
(8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] +
self.cpgs[0]['SDUsage']['usedMiB'])) * const)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'],
free_capacity_gb)
cap_util = (float(total_capacity_gb - free_capacity_gb) /
float(total_capacity_gb)) * 100
self.assertEqual(stats['pools'][0]['capacity_utilization'],
cap_util)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
common.client.deleteCPG(HP3PAR_CPG)
common.client.createCPG(HP3PAR_CPG, {})
def test_create_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': 186}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.createHost(
self.FAKE_HOST,
FCWwns=['123456789012345', '123456789054321'],
optional={'domain': None, 'persona': 2}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
def test_create_invalid_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'), {
'name': 'fakehost.foo',
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
def test_create_modify_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [{
'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345', '123456789054321'],
'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_new_wwn(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_unknown_wwn_and_new_wwn(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 3)
class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d'
TARGET_LUN = 186
properties = {
'driver_volume_type': 'iscsi',
'data':
{'encrypted': False,
'target_discovered': True,
'target_iqn': TARGET_IQN,
'target_lun': TARGET_LUN,
'target_portal': '1.1.1.2:1234'}}
def setup_driver(self, config=None, mock_conf=None, wsapi_version=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpdriver.HP3PARISCSIDriver)
if wsapi_version:
mock_client.getWsApiVersion.return_value = (
wsapi_version)
else:
mock_client.getWsApiVersion.return_value = (
self.wsapi_version_latest)
expected_get_cpgs = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
expected_get_ports = [mock.call.getPorts()]
mock_client.assert_has_calls(
self.standard_login +
expected_get_cpgs +
self.standard_logout +
self.standard_login +
expected_get_ports +
self.standard_logout)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': self.TARGET_LUN, 'type': 0}]
mock_client.getVLUN.return_value = {
'hostname': self.FAKE_HOST,
'lun': self.TARGET_LUN,
'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': self.TARGET_LUN,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getVLUN(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, self.properties)
def test_initialize_connection_encrypted(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': self.TARGET_LUN, 'type': 0}]
mock_client.getVLUN.return_value = {
'hostname': self.FAKE_HOST,
'lun': self.TARGET_LUN,
'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': self.TARGET_LUN,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume_encrypted,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getVLUN(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
expected_properties = self.properties
expected_properties['data']['encrypted'] = True
self.assertDictMatch(result, self.properties)
def test_get_volume_stats(self):
# setup_mock_client drive with the configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.filter_function = FILTER_FUNCTION
config.goodness_function = GOODNESS_FUNCTION
mock_client = self.setup_driver(config=config)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': '1234'
}
# cpg has no limit
mock_client.getCPGAvailableSpace.return_value = {
"capacityEfficiency": {u'compaction': 594.4},
"rawFreeMiB": 1024.0 * 6,
"usableFreeMiB": 1024.0 * 3
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
stats = self.driver.get_volume_stats(True)
const = 0.0009765625
self.assertEqual(stats['storage_protocol'], 'iSCSI')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
self.assertEqual(stats['pools'][0]['capacity_utilization'], 87.5)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
expected = [
mock.call.getStorageSystemInfo(),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPGAvailableSpace(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2),
mock.call.getCPGAvailableSpace(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'],
total_capacity_gb)
free_capacity_gb = int(
(8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] +
self.cpgs[0]['SDUsage']['usedMiB'])) * const)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'],
free_capacity_gb)
cap_util = (float(total_capacity_gb - free_capacity_gb) /
float(total_capacity_gb)) * 100
self.assertEqual(stats['pools'][0]['capacity_utilization'],
cap_util)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
def test_create_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 2},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
def test_create_host_chap_enabled(self):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 2},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.modifyHost(
'fakehost',
expected_mod_request),
mock.call.getHost(self.FAKE_HOST)
]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
def test_create_invalid_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
def test_create_invalid_host_chap_enabled(self):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.modifyHost(
'fakehost.foo',
expected_mod_request),
mock.call.getHost('fakehost.foo')
]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
def test_create_modify_host(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
self.assertEqual(len(host['FCPaths']), 2)
def test_create_modify_host_chap_enabled(self):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.modifyHost(
self.FAKE_HOST,
expected_mod_request
),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
self.assertEqual(len(host['FCPaths']), 2)
def test_get_least_used_nsp_for_host_single(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
# Setup a single ISCSI IP
iscsi_ips = ["10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nsp_for_host_new(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
# Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
# Host 'newhost' does not yet have any iscsi paths,
# so the 'least used' is returned
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertEqual(nsp, "1:8:2")
def test_get_least_used_nsp_for_host_reuse(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
# Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
# hosts 'foo' and 'bar' already have active iscsi paths
# the same one should be used
nsp = self.driver._get_least_used_nsp_for_host(common, 'foo')
self.assertEqual(nsp, "1:8:2")
nsp = self.driver._get_least_used_nsp_for_host(common, 'bar')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nps_for_host_fc(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS1_RET
mock_client.getVLUNs.return_value = VLUNS5_RET
# Setup two ISCSI IPs
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertNotEqual(nsp, "0:6:3")
self.assertEqual(nsp, "1:8:1")
def test_invalid_iscsi_ip(self):
config = self.setup_configuration()
config.hp3par_iscsi_ips = ['10.10.220.250', '10.10.220.251']
config.iscsi_ip_address = '10.10.10.10'
mock_conf = {
'getPorts.return_value': {
'members': [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]}}
# no valid ip addr should be configured.
self.assertRaises(exception.InvalidInput,
self.setup_driver,
config=config,
mock_conf=mock_conf)
def test_get_least_used_nsp(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
ports = [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
# in use count
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['0:2:1', '1:8:1'])
self.assertEqual(nsp, '1:8:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
# in use count
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['0:2:1', '1:2:1'])
self.assertEqual(nsp, '1:2:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
# in use count
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['1:1:1', '1:2:1'])
self.assertEqual(nsp, '1:1:1')
def test_set_3par_chaps(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
expected = []
self.driver._set_3par_chaps(
common, 'test-host', 'test-vol', 'test-host', 'pass')
mock_client.assert_has_calls(expected)
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-host',
'chapSecret': 'fake'
}
expected = [
mock.call.modifyHost('test-host', expected_mod_request)
]
self.driver._set_3par_chaps(
common, 'test-host', 'test-vol', 'test-host', 'fake')
mock_client.assert_has_calls(expected)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export(self, mock_utils):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = []
expected_model = {'provider_auth': None}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
mock_client.reset_mock()
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_host_not_found(self, mock_utils):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = "random-pass"
mock_client.getHostVLUNs.side_effect = hpexceptions.HTTPNotFound(
'fake')
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_host_chap_disabled(self, mock_utils):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'fake-host',
'initiatorChapEnabled': False
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_no_active_vluns(self, mock_utils):
# setup_mock_client drive with CHAP enabled configuration
# and return the mock HTTP 3PAR client
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = "random-pass"
mock_client.getHostVLUNs.return_value = [
{'active': False,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'fake-host',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(model, expected_model)
def test_ensure_export(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_client.getAllVolumeMetaData.return_value = {
'total': 0,
'members': []
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model = self.driver.ensure_export(None, volume)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw')
]
expected_model = {'provider_auth': None}
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
mock_client.getAllVolumeMetaData.return_value = {
'total': 2,
'members': [
{
'creationTimeSec': 1406074222,
'value': 'fake-host',
'key': CHAP_USER_KEY,
'creationTime8601': '2014-07-22T17:10:22-07:00'
},
{
'creationTimeSec': 1406074222,
'value': 'random-pass',
'key': CHAP_PASS_KEY,
'creationTime8601': '2014-07-22T17:10:22-07:00'
}
]
}
model = self.driver.ensure_export(None, volume)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw')
]
expected_model = {'provider_auth': "CHAP fake-host random-pass"}
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
def test_ensure_export_missing_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound(
'fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model = self.driver.ensure_export(None, volume)
expected = [mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw')]
expected_model = None
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_volume_settings_default_pool(self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {}}
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = {'host': 'test-host@3pariscsi#pool_foo',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
pool = volume_utils.extract_host(volume['host'], 'pool')
model = common.get_volume_settings_from_type_id('gold-id', pool)
self.assertEqual(model['cpg'], 'pool_foo')
def test_get_model_update(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model_update = common._get_model_update('xxx@yyy#zzz', 'CPG')
self.assertEqual(model_update, {'host': 'xxx@yyy#CPG'})
VLUNS5_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'active': True}]})
PORTS_RET = ({'members':
[{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]})
VLUNS1_RET = ({'members':
[{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'hostname': 'foo', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True}]})
PORTS1_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.120.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8},
{'portWWN': '20210002AC00383D',
'protocol': 1,
'linkState': 4,
'mode': 2,
'device': ['cage2'],
'nodeWWN': '20210002AC00383D',
'type': 2,
'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]})
| 41.652425 | 79 | 0.539518 |
import mock
import ast
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
from cinder import context
from cinder import exception
from cinder import test
from cinder.tests import fake_hp_3par_client as hp3parclient
from cinder.volume.drivers.san.hp import hp_3par_common as hpcommon
from cinder.volume.drivers.san.hp import hp_3par_fc as hpfcdriver
from cinder.volume.drivers.san.hp import hp_3par_iscsi as hpdriver
from cinder.volume import qos_specs
from cinder.volume import utils as volume_utils
from cinder.volume import volume_types
hpexceptions = hp3parclient.hpexceptions
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
HP3PAR_CPG = 'OpenStackCPG'
HP3PAR_CPG2 = 'fakepool'
HP3PAR_CPG_QOS = 'qospool'
HP3PAR_CPG_SNAP = 'OpenStackCPGSnap'
HP3PAR_USER_NAME = 'testUser'
HP3PAR_USER_PASS = 'testPassword'
HP3PAR_SAN_IP = '2.2.2.2'
HP3PAR_SAN_SSH_PORT = 999
HP3PAR_SAN_SSH_CON_TIMEOUT = 44
HP3PAR_SAN_SSH_PRIVATE = 'foobar'
GOODNESS_FUNCTION = \
"stats.capacity_utilization < 0.6? 100:25"
FILTER_FUNCTION = \
"stats.total_volumes < 400 && stats.capacity_utilization < 0.8"
CHAP_USER_KEY = "HPQ-cinder-CHAP-name"
CHAP_PASS_KEY = "HPQ-cinder-CHAP-secret"
FLASH_CACHE_ENABLED = 1
FLASH_CACHE_DISABLED = 2
class HP3PARBaseDriver(object):
class CommentMatcher(object):
def __init__(self, f, expect):
self.assertEqual = f
self.expect = expect
def __eq__(self, actual):
actual_as_dict = dict(ast.literal_eval(actual))
self.assertEqual(self.expect, actual_as_dict)
return True
VOLUME_ID = 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'
CLONE_ID = 'd03338a9-9115-48a3-8dfc-000000000000'
VOLUME_TYPE_ID_DEDUP = 'd03338a9-9115-48a3-8dfc-11111111111'
VOLUME_TYPE_ID_FLASH_CACHE = 'd03338a9-9115-48a3-8dfc-22222222222'
VOLUME_NAME = 'volume-' + VOLUME_ID
VOLUME_NAME_3PAR = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_ID = '2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
SNAPSHOT_NAME = 'snapshot-2f823bdc-e36e-4dc8-bd15-de1c7a28ff31'
VOLUME_3PAR_NAME = 'osv-0DM4qZEVSKON-DXN-NwVpw'
SNAPSHOT_3PAR_NAME = 'oss-L4I73ONuTci9Fd4ceij-MQ'
FAKE_HOST = 'fakehost'
FAKE_CINDER_HOST = 'fakehost@foo#' + HP3PAR_CPG
USER_ID = '2689d9a913974c008b1d859013f23607'
PROJECT_ID = 'fac88235b9d64685a3530f73e490348f'
VOLUME_ID_SNAP = '761fc5e5-5191-4ec7-aeba-33e36de44156'
FAKE_DESC = 'test description name'
FAKE_FC_PORTS = [{'portPos': {'node': 7, 'slot': 1, 'cardPort': 1},
'portWWN': '0987654321234',
'protocol': 1,
'mode': 2,
'linkState': 4},
{'portPos': {'node': 6, 'slot': 1, 'cardPort': 1},
'portWWN': '123456789000987',
'protocol': 1,
'mode': 2,
'linkState': 4}]
QOS = {'qos:maxIOPS': '1000', 'qos:maxBWS': '50',
'qos:minIOPS': '100', 'qos:minBWS': '25',
'qos:latency': '25', 'qos:priority': 'low'}
QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'}
VVS_NAME = "myvvs"
FAKE_ISCSI_PORT = {'portPos': {'node': 8, 'slot': 1, 'cardPort': 1},
'protocol': 2,
'mode': 2,
'IPAddr': '1.1.1.2',
'iSCSIName': ('iqn.2000-05.com.3pardata:'
'21810002ac00383d'),
'linkState': 4}
volume = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': None}
volume_encrypted = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': None,
'encryption_key_id': 'fake_key'}
volume_dedup = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': 'dedup',
'volume_type_id': VOLUME_TYPE_ID_DEDUP}
volume_pool = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': volume_utils.append_host(FAKE_HOST, HP3PAR_CPG2),
'volume_type': None,
'volume_type_id': None}
volume_qos = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': 'gold'}
volume_flash_cache = {'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': FAKE_CINDER_HOST,
'volume_type': None,
'volume_type_id': VOLUME_TYPE_ID_FLASH_CACHE}
snapshot = {'name': SNAPSHOT_NAME,
'id': SNAPSHOT_ID,
'user_id': USER_ID,
'project_id': PROJECT_ID,
'volume_id': VOLUME_ID_SNAP,
'volume_name': VOLUME_NAME,
'status': 'creating',
'progress': '0%',
'volume_size': 2,
'display_name': 'fakesnap',
'display_description': FAKE_DESC}
wwn = ["123456789012345", "123456789054321"]
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': [wwn[0], wwn[1]],
'wwnns': ["223456789012345", "223456789054321"],
'host': FAKE_HOST}
volume_type = {'name': 'gold',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'qos:maxIOPS': '1000',
'qos:maxBWS': '50',
'qos:minIOPS': '100',
'qos:minBWS': '25',
'qos:latency': '25',
'qos:priority': 'low'},
'deleted_at': None,
'id': 'gold'}
volume_type_dedup = {'name': 'dedup',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'provisioning': 'dedup'},
'deleted_at': None,
'id': VOLUME_TYPE_ID_DEDUP}
volume_type_flash_cache = {'name': 'flash-cache-on',
'deleted': False,
'updated_at': None,
'extra_specs': {'cpg': HP3PAR_CPG2,
'hp3par:flash_cache': 'true'},
'deleted_at': None,
'id': VOLUME_TYPE_ID_FLASH_CACHE}
flash_cache_3par_keys = {'flash_cache': 'true'}
cpgs = [
{'SAGrowth': {'LDLayout': {'diskPatterns': [{'diskType': 2}]},
'incrementMiB': 8192},
'SAUsage': {'rawTotalMiB': 24576,
'rawUsedMiB': 768,
'totalMiB': 8192,
'usedMiB': 256},
'SDGrowth': {'LDLayout': {'RAIDType': 4,
'diskPatterns': [{'diskType': 2}]},
'incrementMiB': 32768},
'SDUsage': {'rawTotalMiB': 49152,
'rawUsedMiB': 1023,
'totalMiB': 36864,
'usedMiB': 1024 * 1},
'UsrUsage': {'rawTotalMiB': 57344,
'rawUsedMiB': 43349,
'totalMiB': 43008,
'usedMiB': 1024 * 20},
'additionalStates': [],
'degradedStates': [],
'failedStates': [],
'id': 5,
'name': HP3PAR_CPG,
'numFPVVs': 2,
'numTPVVs': 0,
'numTDVVs': 1,
'state': 1,
'uuid': '29c214aa-62b9-41c8-b198-543f6cf24edf'}]
TASK_DONE = 1
TASK_ACTIVE = 2
STATUS_DONE = {'status': 1}
STATUS_ACTIVE = {'status': 2}
mock_client_conf = {
'PORT_MODE_TARGET': 2,
'PORT_STATE_READY': 4,
'PORT_PROTO_ISCSI': 2,
'PORT_PROTO_FC': 1,
'TASK_DONE': TASK_DONE,
'TASK_ACTIVE': TASK_ACTIVE,
'HOST_EDIT_ADD': 1,
'CHAP_INITIATOR': 1,
'CHAP_TARGET': 2,
'getPorts.return_value': {
'members': FAKE_FC_PORTS + [FAKE_ISCSI_PORT]
}
}
RETYPE_VVS_NAME = "yourvvs"
RETYPE_HOST = {
u'host': u'mark-stack1@3parfc',
u'capabilities': {
'QoS_support': True,
u'location_info': u'HP3PARDriver:1234567:MARK_TEST_CPG',
u'timestamp': u'2014-06-04T19:03:32.485540',
u'allocated_capacity_gb': 0,
u'volume_backend_name': u'3parfc',
u'free_capacity_gb': u'infinite',
u'driver_version': u'2.0.3',
u'total_capacity_gb': u'infinite',
u'reserved_percentage': 0,
u'vendor_name': u'Hewlett-Packard',
u'storage_protocol': u'FC'
}
}
RETYPE_HOST_NOT3PAR = {
u'host': u'mark-stack1@3parfc',
u'capabilities': {
u'location_info': u'XXXDriverXXX:1610771:MARK_TEST_CPG',
}
}
RETYPE_QOS_SPECS = {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'high'}
RETYPE_VOLUME_TYPE_ID = "FakeVolId"
RETYPE_VOLUME_TYPE_0 = {
'name': 'red',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': True,
'tdvv': False,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_1 = {
'name': 'white',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': VVS_NAME,
'qos': QOS,
'tpvv': True,
'tdvv': False,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_2 = {
'name': 'blue',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': True,
'tdvv': False,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_3 = {
'name': 'purple',
'id': RETYPE_VOLUME_TYPE_ID,
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs': RETYPE_VVS_NAME,
'qos': RETYPE_QOS_SPECS,
'tpvv': False,
'tdvv': True,
'volume_type': volume_type
}
}
RETYPE_VOLUME_TYPE_BAD_PERSONA = {
'name': 'bad_persona',
'id': 'any_id',
'extra_specs': {
'hp3par:persona': '99 - invalid'
}
}
RETYPE_VOLUME_TYPE_BAD_CPG = {
'name': 'bad_cpg',
'id': 'any_id',
'extra_specs': {
'cpg': 'bogus',
'snap_cpg': 'bogus',
'hp3par:persona': '2 - Generic-ALUA'
}
}
MANAGE_VOLUME_INFO = {
'userCPG': 'testUserCpg0',
'snapCPG': 'testSnapCpg0',
'provisioningType': 1,
'comment': "{'display_name': 'Foo Volume'}"
}
MV_INFO_WITH_NO_SNAPCPG = {
'userCPG': 'testUserCpg0',
'provisioningType': 1,
'comment': "{'display_name': 'Foo Volume'}"
}
RETYPE_TEST_COMMENT = "{'retype_test': 'test comment'}"
RETYPE_VOLUME_INFO_0 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol0',
'size': 1,
'host': RETYPE_HOST,
'userCPG': 'testUserCpg0',
'snapCPG': 'testSnapCpg0',
'provisioningType': 1,
'comment': RETYPE_TEST_COMMENT
}
RETYPE_TEST_COMMENT_1 = "{'retype_test': 'test comment 1'}"
RETYPE_VOLUME_INFO_1 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol1',
'size': 1,
'host': RETYPE_HOST,
'userCPG': HP3PAR_CPG,
'snapCPG': HP3PAR_CPG_SNAP,
'provisioningType': 1,
'comment': RETYPE_TEST_COMMENT
}
RETYPE_TEST_COMMENT_2 = "{'retype_test': 'test comment 2'}"
RETYPE_VOLUME_INFO_2 = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol2',
'size': 1,
'host': RETYPE_HOST,
'userCPG': HP3PAR_CPG,
'snapCPG': HP3PAR_CPG_SNAP,
'provisioningType': 3,
'comment': RETYPE_TEST_COMMENT
}
RETYPE_VOLUME_INFO_NO_SNAP = {
'name': VOLUME_NAME,
'id': VOLUME_ID,
'display_name': 'Retype Vol2',
'size': 1,
'host': RETYPE_HOST,
'userCPG': 'testUserCpg2',
'provisioningType': 1,
'comment': '{}'
}
RETYPE_CONF = {
'TASK_ACTIVE': TASK_ACTIVE,
'TASK_DONE': TASK_DONE,
'getTask.return_value': STATUS_DONE,
'getStorageSystemInfo.return_value': {'serialNumber': '1234567'},
'getVolume.return_value': RETYPE_VOLUME_INFO_0,
'modifyVolume.return_value': ("anyResponse", {'taskid': 1})
}
# 3PAR retype currently doesn't use the diff. Existing code and fresh info
RETYPE_DIFF = None
wsapi_version_312 = {'major': 1,
'build': 30102422,
'minor': 3,
'revision': 1}
wsapi_version_for_dedup = {'major': 1,
'build': 30201120,
'minor': 4,
'revision': 1}
wsapi_version_for_flash_cache = {'major': 1,
'build': 30201200,
'minor': 4,
'revision': 2}
wsapi_version_latest = wsapi_version_for_flash_cache
standard_login = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
missing_key_policy='AutoAddPolicy',
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=mock.ANY,
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT)]
standard_logout = [
mock.call.logout()]
def setup_configuration(self):
configuration = mock.Mock()
configuration.hp3par_debug = False
configuration.hp3par_username = HP3PAR_USER_NAME
configuration.hp3par_password = HP3PAR_USER_PASS
configuration.hp3par_api_url = 'https://1.1.1.1/api/v1'
configuration.hp3par_cpg = [HP3PAR_CPG, HP3PAR_CPG2]
configuration.hp3par_cpg_snap = HP3PAR_CPG_SNAP
configuration.iscsi_ip_address = '1.1.1.2'
configuration.iscsi_port = '1234'
configuration.san_ip = HP3PAR_SAN_IP
configuration.san_login = HP3PAR_USER_NAME
configuration.san_password = HP3PAR_USER_PASS
configuration.san_ssh_port = HP3PAR_SAN_SSH_PORT
configuration.ssh_conn_timeout = HP3PAR_SAN_SSH_CON_TIMEOUT
configuration.san_private_key = HP3PAR_SAN_SSH_PRIVATE
configuration.hp3par_snapshot_expiration = ""
configuration.hp3par_snapshot_retention = ""
configuration.hp3par_iscsi_ips = []
configuration.hp3par_iscsi_chap_enabled = False
configuration.goodness_function = GOODNESS_FUNCTION
configuration.filter_function = FILTER_FUNCTION
return configuration
@mock.patch(
'hp3parclient.client.HP3ParClient',
spec=True,
)
def setup_mock_client(self, _m_client, driver, conf=None, m_conf=None):
_m_client = _m_client.return_value
_m_client.configure_mock(**self.mock_client_conf)
if m_conf is not None:
_m_client.configure_mock(**m_conf)
if conf is None:
conf = self.setup_configuration()
self.driver = driver(configuration=conf)
self.driver.do_setup(None)
return _m_client
@mock.patch('hp3parclient.version', "3.0.9")
def test_unsupported_client_version(self):
self.assertRaises(exception.InvalidInput,
self.setup_driver)
@mock.patch('hp3parclient.version', "3.1.2")
def test_ssh_options(self):
expected_hosts_key_file = "test_hosts_key_file"
orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file
orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
CONF.ssh_hosts_key_file = expected_hosts_key_file
CONF.strict_ssh_host_key_policy = False
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(driver=hpfcdriver.HP3PARFCDriver)
CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file
CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=expected_hosts_key_file,
missing_key_policy="AutoAddPolicy",
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(
expected +
self.standard_logout)
@mock.patch('hp3parclient.version', "3.1.2")
def test_ssh_options_strict(self):
expected_hosts_key_file = "test_hosts_key_file"
orig_ssh_hosts_key_file = CONF.ssh_hosts_key_file
orig_strict_ssh_host_key_policy = CONF.strict_ssh_host_key_policy
CONF.ssh_hosts_key_file = expected_hosts_key_file
CONF.strict_ssh_host_key_policy = True
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(driver=hpfcdriver.HP3PARFCDriver)
CONF.ssh_hosts_key_file = orig_ssh_hosts_key_file
CONF.strict_ssh_host_key_policy = orig_strict_ssh_host_key_policy
expected = [
mock.call.login(HP3PAR_USER_NAME, HP3PAR_USER_PASS),
mock.call.setSSHOptions(
HP3PAR_SAN_IP,
HP3PAR_USER_NAME,
HP3PAR_USER_PASS,
privatekey=HP3PAR_SAN_SSH_PRIVATE,
known_hosts_file=expected_hosts_key_file,
missing_key_policy="RejectPolicy",
port=HP3PAR_SAN_SSH_PORT,
conn_timeout=HP3PAR_SAN_SSH_CON_TIMEOUT),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(expected + self.standard_logout)
def test_task_waiter(self):
task_statuses = [self.STATUS_ACTIVE, self.STATUS_ACTIVE]
def side_effect(*args):
return task_statuses and task_statuses.pop(0) or self.STATUS_DONE
conf = {'getTask.side_effect': side_effect}
mock_client = self.setup_driver(mock_conf=conf)
task_id = 1234
interval = .001
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
waiter = common.TaskWaiter(mock_client, task_id, interval)
status = waiter.wait_for_task()
expected = [
mock.call.getTask(task_id),
mock.call.getTask(task_id),
mock.call.getTask(task_id)
]
mock_client.assert_has_calls(expected)
self.assertEqual(status, self.STATUS_DONE)
def test_create_volume(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_volume(self.volume)
comment = (
'{"display_name": "Foo Volume", "type": "OpenStack",'
' "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_volume_in_pool(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_pool)
comment = (
'{"display_name": "Foo Volume", "type": "OpenStack",'
' "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG2,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_unsupported_dedup_volume_type(self, _mock_volume_types):
mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312)
_mock_volume_types.return_value = {
'name': 'dedup',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'provisioning': 'dedup',
'volume_type': self.volume_type_dedup}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.assertRaises(exception.InvalidInput,
common.get_volume_settings_from_type_id,
self.VOLUME_TYPE_ID_DEDUP,
"mock")
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type(self, _mock_volume_types):
mock_client = self.setup_driver()
expected_type_snap_cpg = "type_snap_cpg"
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': expected_type_snap_cpg,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_type_snap_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_cpg(self, _mock_volume_types):
mock_client = self.setup_driver()
expected_cpg = 'use_extra_specs_cpg'
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': expected_cpg,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(self.driver.configuration.hp3par_cpg_snap,
result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_conf_snap_cpg(
self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'volume_type': self.volume_type}}
conf = self.setup_configuration()
expected_snap_cpg = conf.hp3par_cpg_snap
mock_client = self.setup_driver(config=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_snap_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_snap_cpg_from_volume_type_conf_cpg(
self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'volume_type': self.volume_type}}
conf = self.setup_configuration()
conf.hp3par_cpg_snap = None
expected_cpg = conf.hp3par_cpg
mock_client = self.setup_driver(config=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
result = common.get_volume_settings_from_type_id(
"mock", self.driver.configuration.hp3par_cpg)
self.assertEqual(expected_cpg, result['snap_cpg'])
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_qos(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_qos)
comment = (
'{"volume_type_name": "gold", "display_name": "Foo Volume"'
', "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7'
'", "volume_type_id": "gold", "volume_id": "d03338a9-91'
'15-48a3-8dfc-35cdfcdc15a7", "qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_dedup(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'dedup',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'provisioning': 'dedup',
'volume_type': self.volume_type_dedup}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
return_model = self.driver.create_volume(self.volume_dedup)
comment = (
'{"volume_type_name": "dedup", "display_name": "Foo Volume"'
', "name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7'
'", "volume_type_id": "d03338a9-9115-48a3-8dfc-11111111111"'
', "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"'
', "qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': False,
'tdvv': True,
'snapCPG': HP3PAR_CPG_SNAP})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_flash_cache(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'flash-cache-on',
'extra_specs': {
'cpg': HP3PAR_CPG2,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'hp3par:flash_cache': 'true',
'volume_type': self.volume_type_flash_cache}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
mock_client.getCPG.return_value = {'domain': None}
mock_client.FLASH_CACHE_ENABLED = FLASH_CACHE_ENABLED
mock_client.FLASH_CACHE_DISABLED = FLASH_CACHE_DISABLED
return_model = self.driver.create_volume(self.volume_flash_cache)
comment = (
'{"volume_type_name": "flash-cache-on", '
'"display_name": "Foo Volume", '
'"name": "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7", '
'"volume_type_id": "d03338a9-9115-48a3-8dfc-22222222222", '
'"volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7", '
'"qos": {}, "type": "OpenStack"}')
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolume(
self.VOLUME_3PAR_NAME,
HP3PAR_CPG,
1907, {
'comment': comment,
'tpvv': True,
'tdvv': False,
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.getCPG(HP3PAR_CPG),
mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None),
mock.call.createQoSRules(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
{'priority': 2}
),
mock.call.modifyVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw', flashCachePolicy=1),
mock.call.addVolumeToVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
'osv-0DM4qZEVSKON-DXN-NwVpw')]
mock_client.assert_has_calls(
[mock.call.getWsApiVersion()] +
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(return_model, None)
@mock.patch.object(volume_types, 'get_volume_type')
def test_unsupported_flash_cache_volume(self, _mock_volume_types):
mock_client = self.setup_driver(wsapi_version=self.wsapi_version_312)
_mock_volume_types.return_value = {
'name': 'flash-cache-on',
'extra_specs': {
'cpg': HP3PAR_CPG2,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'hp3par:flash_cache': 'true',
'volume_type': self.volume_type_flash_cache}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.assertRaises(exception.InvalidInput,
common.get_flash_cache_policy,
self.flash_cache_3par_keys)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_not_3par(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidHost,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST_NOT3PAR)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_volume_not_found(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPNotFound,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_specs_error_reverts_snap_cpg(self, _mock_volume_types):
_mock_volume_types.side_effect = [
self.RETYPE_VOLUME_TYPE_1, self.RETYPE_VOLUME_TYPE_0]
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_0
mock_client.addVolumeToVolumeSet.side_effect = \
hpexceptions.HTTPForbidden
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.retype,
self.ctxt,
{'id': self.VOLUME_ID},
self.RETYPE_VOLUME_TYPE_0,
self.RETYPE_DIFF,
self.RETYPE_HOST)
old_settings = {
'snapCPG': self.RETYPE_VOLUME_INFO_0['snapCPG'],
'comment': self.RETYPE_VOLUME_INFO_0['comment']}
new_settings = {
'snapCPG': (
self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg']),
'comment': mock.ANY}
expected = [
mock.call.modifyVolume(self.VOLUME_3PAR_NAME, new_settings)
]
mock_client.assert_has_calls(expected)
expected = [
mock.call.modifyVolume(self.VOLUME_3PAR_NAME, old_settings)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_revert_comment(self, _mock_volume_types):
_mock_volume_types.side_effect = [
self.RETYPE_VOLUME_TYPE_2, self.RETYPE_VOLUME_TYPE_1]
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getVolume.return_value = self.RETYPE_VOLUME_INFO_1
mock_client.deleteVolumeSet.side_effect = hpexceptions.HTTPForbidden
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.retype,
self.ctxt,
{'id': self.VOLUME_ID},
self.RETYPE_VOLUME_TYPE_2,
self.RETYPE_DIFF,
self.RETYPE_HOST)
original = {
'snapCPG': self.RETYPE_VOLUME_INFO_1['snapCPG'],
'comment': self.RETYPE_VOLUME_INFO_1['comment']}
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw', original)]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_different_array(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': 'XXXXXXX'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidHost,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo()]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_across_cpg_domains(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = [
{'domain': 'domain1'},
{'domain': 'domain2'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.Invalid3PARDomain,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo(),
mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_across_snap_cpg_domains(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
mock_client.getCPG.side_effect = [
{'domain': 'cpg_domain'},
{'domain': 'cpg_domain'},
{'domain': 'snap_cpg_domain_1'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.Invalid3PARDomain,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_1,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getStorageSystemInfo(),
mock.call.getCPG(self.RETYPE_VOLUME_INFO_0['userCPG']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['cpg']),
mock.call.getCPG(
self.RETYPE_VOLUME_TYPE_1['extra_specs']['snap_cpg'])
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_to_bad_persona(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_BAD_PERSONA
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.assertRaises(exception.InvalidInput,
self.driver.retype,
self.ctxt,
self.RETYPE_VOLUME_INFO_0,
self.RETYPE_VOLUME_TYPE_BAD_PERSONA,
self.RETYPE_DIFF,
self.RETYPE_HOST)
expected = [mock.call.getVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_tune(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
volume = {'id': HP3PARBaseDriver.CLONE_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
retyped = self.driver.retype(
self.ctxt, volume, type_ref, None, self.RETYPE_HOST)
self.assertTrue(retyped)
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA',
{'comment': mock.ANY,
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.deleteVolumeSet('vvs-0DM4qZEVSKON-AAAAAAAAA'),
mock.call.addVolumeToVolumeSet('myvvs',
'osv-0DM4qZEVSKON-AAAAAAAAA'),
mock.call.modifyVolume('osv-0DM4qZEVSKON-AAAAAAAAA',
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_qos_spec(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
cpg = "any_cpg"
snap_cpg = "any_cpg"
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common._retype(self.volume,
HP3PARBaseDriver.VOLUME_3PAR_NAME,
"old_type", "old_type_id",
HP3PARBaseDriver.RETYPE_HOST,
None, cpg, cpg, snap_cpg, snap_cpg,
True, False, False, True, None, None,
self.QOS_SPECS, self.RETYPE_QOS_SPECS,
None, None,
"{}")
expected = [
mock.call.createVolumeSet('vvs-0DM4qZEVSKON-DXN-NwVpw', None),
mock.call.createQoSRules(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
{'ioMinGoal': 100, 'ioMaxLimit': 1000,
'bwMinGoalKB': 25600, 'bwMaxLimitKB': 51200,
'priority': 3,
'latencyGoal': 25}
),
mock.call.addVolumeToVolumeSet(
'vvs-0DM4qZEVSKON-DXN-NwVpw',
'osv-0DM4qZEVSKON-DXN-NwVpw')]
mock_client.assert_has_calls(expected)
@mock.patch.object(volume_types, 'get_volume_type')
def test_retype_dedup(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_3
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
cpg = "any_cpg"
snap_cpg = "any_cpg"
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common._retype(self.volume,
HP3PARBaseDriver.VOLUME_3PAR_NAME,
"old_type", "old_type_id",
HP3PARBaseDriver.RETYPE_HOST,
None, cpg, cpg, snap_cpg, snap_cpg,
True, False, False, True, None, None,
self.QOS_SPECS, self.RETYPE_QOS_SPECS,
None, None,
"{}")
expected = [
mock.call.modifyVolume('osv-0DM4qZEVSKON-DXN-NwVpw',
{'action': 6,
'userCPG': 'any_cpg',
'conversionOperation': 3,
'tuneOperation': 1}),
mock.call.getTask(1)]
mock_client.assert_has_calls(expected)
def test_delete_volume(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.delete_volume(self.volume)
expected = [mock.call.deleteVolume(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_cloned_volume(self):
mock_client = self.setup_driver()
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'host': volume_utils.append_host(self.FAKE_HOST,
HP3PAR_CPG2),
'source_volid': HP3PARBaseDriver.VOLUME_ID}
src_vref = {}
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertIsNone(model_update)
expected = [
mock.call.copyVolume(
self.VOLUME_3PAR_NAME,
'osv-0DM4qZEVSKON-AAAAAAAAA',
HP3PAR_CPG2,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
'tdvv': False, 'online': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_cloned_qos_volume(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
mock_client = self.setup_driver()
mock_client.copyVolume.return_value = {'taskid': 1}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
src_vref = {}
volume = self.volume_qos.copy()
host = "TEST_HOST"
pool = "TEST_POOL"
volume_host = volume_utils.append_host(host, pool)
expected_cpg = pool
volume['id'] = HP3PARBaseDriver.CLONE_ID
volume['host'] = volume_host
volume['source_volid'] = HP3PARBaseDriver.VOLUME_ID
model_update = self.driver.create_cloned_volume(volume, src_vref)
self.assertEqual(model_update, None)
expected = [
mock.call.getCPG(expected_cpg),
mock.call.copyVolume(
self.VOLUME_3PAR_NAME,
'osv-0DM4qZEVSKON-AAAAAAAAA',
expected_cpg,
{'snapCPG': 'OpenStackCPGSnap', 'tpvv': True,
'tdvv': False, 'online': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_migrate_volume(self):
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1#CPG-FC1',
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': '{"qos": {}, "display_name": "Foo Volume"}',
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY)
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_with_type(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_2
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
display_name = 'Foo Volume'
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': display_name,
"volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'],
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
instance_host = 'stack@3parfc1#CPG-FC1'
host = {'host': instance_host,
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected_comment = {
"display_name": display_name,
"volume_type_id": self.RETYPE_VOLUME_TYPE_2['id'],
"volume_type_name": self.RETYPE_VOLUME_TYPE_2['name'],
"vvs": self.RETYPE_VOLUME_TYPE_2['extra_specs']['vvs']
}
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': self.CommentMatcher(self.assertEqual,
expected_comment),
'snapCPG': self.RETYPE_VOLUME_TYPE_2
['extra_specs']['snap_cpg']}),
mock.call.modifyVolume(
osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY)
]
mock_client.assert_has_calls(
expected +
self.standard_logout)
def test_migrate_volume_diff_host(self):
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': 'different'},
}
mock_client = self.setup_driver(mock_conf=conf)
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((False, None), result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_diff_domain(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
conf = {
'getStorageSystemInfo.return_value': {
'serialNumber': '1234'},
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': self.RETYPE_VOLUME_INFO_1
}
mock_client = self.setup_driver(mock_conf=conf)
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'volume_type_id': None,
'size': 2,
'status': 'available',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
loc_info = 'HP3PARDriver:1234:CPG-FC1'
host = {'host': 'stack@3parfc1
'capabilities': {'location_info': loc_info}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((True, None), result)
osv_matcher = 'osv-' + volume_name_3par
expected = [
mock.call.modifyVolume(
osv_matcher,
{'comment': '{"qos": {}, "display_name": "Foo Volume"}',
'snapCPG': HP3PAR_CPG_SNAP}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPG-FC1',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(mock.ANY),
]
mock_client.assert_has_calls(expected + self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
loc_info = 'HP3PARDriver:1234567:CPG-FC1'
protocol = "FC"
if self.properties['driver_volume_type'] == "iscsi":
protocol = "iSCSI"
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
new_comment = {"qos": {},
"retype_test": "test comment"}
expected = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual, new_comment),
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'OpenStackCPG',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1),
mock.call.logout()
]
mock_client.assert_has_calls(expected)
self.assertIsNotNone(result)
self.assertEqual((True, {'host': 'stack@3parfc1
result)
@mock.patch.object(volume_types, 'get_volume_type')
def test_migrate_volume_attached_diff_protocol(self, _mock_volume_types):
_mock_volume_types.return_value = self.RETYPE_VOLUME_TYPE_1
mock_client = self.setup_driver(mock_conf=self.RETYPE_CONF)
protocol = "OTHER"
volume = {'name': HP3PARBaseDriver.VOLUME_NAME,
'volume_type_id': None,
'id': HP3PARBaseDriver.CLONE_ID,
'display_name': 'Foo Volume',
'size': 2,
'status': 'in-use',
'host': HP3PARBaseDriver.FAKE_HOST,
'source_volid': HP3PARBaseDriver.VOLUME_ID}
loc_info = 'HP3PARDriver:1234567:CPG-FC1'
host = {'host': 'stack@3parfc1',
'capabilities': {'location_info': loc_info,
'storage_protocol': protocol}}
result = self.driver.migrate_volume(context.get_admin_context(),
volume, host)
self.assertIsNotNone(result)
self.assertEqual((False, None), result)
expected = []
mock_client.assert_has_calls(expected)
def test_attach_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.attach_volume(context.get_admin_context(),
self.volume,
'abcdef',
'newhost',
'/dev/vdb')
expected = [
mock.call.setVolumeMetaData(
self.VOLUME_3PAR_NAME,
'HPQ-CS-instance_uuid',
'abcdef')]
mock_client.assert_has_calls(expected)
# test the exception
mock_client.setVolumeMetaData.side_effect = Exception('Custom ex')
self.assertRaises(exception.CinderException,
self.driver.attach_volume,
context.get_admin_context(),
self.volume,
'abcdef',
'newhost',
'/dev/vdb')
def test_detach_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.detach_volume(context.get_admin_context(), self.volume,
None)
expected = [
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME,
'HPQ-CS-instance_uuid')]
mock_client.assert_has_calls(expected)
# test the exception
mock_client.removeVolumeMetaData.side_effect = Exception(
'Custom ex')
self.assertRaises(exception.CinderException,
self.driver.detach_volume,
context.get_admin_context(),
self.volume, None)
def test_create_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
comment = (
'{"volume_id": "761fc5e5-5191-4ec7-aeba-33e36de44156",'
' "display_name": "fakesnap",'
' "description": "test description name",'
' "volume_name":'
' "volume-d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
'oss-L4I73ONuTci9Fd4ceij-MQ',
'osv-dh-F5VGRTseuujPjbeRBVg',
{
'comment': comment,
'readOnly': True})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_delete_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.delete_snapshot(self.snapshot)
expected = [
mock.call.deleteVolume('oss-L4I73ONuTci9Fd4ceij-MQ')]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_delete_snapshot_in_use(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
self.driver.create_volume_from_snapshot(self.volume, self.snapshot)
ex = hpexceptions.HTTPConflict("In use")
mock_client.deleteVolume = mock.Mock(side_effect=ex)
# Deleting the snapshot that a volume is dependent on should fail
self.assertRaises(exception.SnapshotIsBusy,
self.driver.delete_snapshot,
self.snapshot)
def test_delete_snapshot_not_found(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.create_snapshot(self.snapshot)
try:
ex = hpexceptions.HTTPNotFound("not found")
mock_client.deleteVolume = mock.Mock(side_effect=ex)
self.driver.delete_snapshot(self.snapshot)
except Exception:
self.fail("Deleting a snapshot that is missing should act "
"as if it worked.")
def test_create_volume_from_snapshot(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model_update = self.driver.create_volume_from_snapshot(
self.volume,
self.snapshot)
self.assertIsNone(model_update)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
volume = self.volume.copy()
volume['size'] = 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
def test_create_volume_from_snapshot_and_extend(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 1},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = self.volume.copy()
volume['size'] = self.volume['size'] + 10
model_update = self.driver.create_volume_from_snapshot(
volume,
self.snapshot)
self.assertEqual(model_update, None)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
omv_matcher = 'omv-' + volume_name_3par
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False}),
mock.call.copyVolume(
osv_matcher, omv_matcher, HP3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
mock.call.getVolume(osv_matcher),
mock.call.deleteVolume(osv_matcher),
mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}),
mock.call.growVolume(osv_matcher, 10 * 1024)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_from_snapshot_and_extend_with_qos(
self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 1},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG_QOS,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = self.volume_qos.copy()
volume['size'] = self.volume['size'] + 10
model_update = self.driver.create_volume_from_snapshot(
volume,
self.snapshot)
self.assertEqual(model_update, None)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
volume_name_3par = common._encode_name(volume['id'])
osv_matcher = 'osv-' + volume_name_3par
omv_matcher = 'omv-' + volume_name_3par
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ',
{
'comment': comment,
'readOnly': False}),
mock.call.getCPG(HP3PAR_CPG),
mock.call.copyVolume(
osv_matcher, omv_matcher, HP3PAR_CPG, mock.ANY),
mock.call.getTask(mock.ANY),
mock.call.getVolume(osv_matcher),
mock.call.deleteVolume(osv_matcher),
mock.call.modifyVolume(omv_matcher, {'newName': osv_matcher}),
mock.call.growVolume(osv_matcher, 10 * 1024)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_create_volume_from_snapshot_and_extend_copy_fail(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
conf = {
'getTask.return_value': {
'status': 4,
'failure message': 'out of disk space'},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {}
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = self.volume.copy()
volume['size'] = self.volume['size'] + 10
self.assertRaises(exception.CinderException,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_from_snapshot_qos(self, _mock_volume_types):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
_mock_volume_types.return_value = {
'name': 'gold',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
self.driver.create_volume_from_snapshot(
self.volume_qos,
self.snapshot)
comment = (
'{"snapshot_id": "2f823bdc-e36e-4dc8-bd15-de1c7a28ff31",'
' "display_name": "Foo Volume",'
' "volume_id": "d03338a9-9115-48a3-8dfc-35cdfcdc15a7"}')
expected = [
mock.call.createSnapshot(
self.VOLUME_3PAR_NAME,
'oss-L4I73ONuTci9Fd4ceij-MQ', {
'comment': comment,
'readOnly': False})]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
volume = self.volume.copy()
volume['size'] = 1
self.assertRaises(exception.InvalidInput,
self.driver.create_volume_from_snapshot,
volume, self.snapshot)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
self.driver.terminate_connection(
self.volume,
self.connector,
force=True)
expected = [
mock.call.queryHost(iqns=[self.connector['initiator']]),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_USER_KEY),
mock.call.removeVolumeMetaData(
self.VOLUME_3PAR_NAME, CHAP_PASS_KEY)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_update_volume_key_value_pair(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
key = 'a'
value = 'b'
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
common.update_volume_key_value_pair(
self.volume,
key,
value)
expected = [
mock.call.setVolumeMetaData(self.VOLUME_3PAR_NAME, key, value)]
mock_client.assert_has_calls(expected)
# check exception
mock_client.setVolumeMetaData.side_effect = Exception('fake')
self.assertRaises(exception.VolumeBackendAPIException,
common.update_volume_key_value_pair,
self.volume,
None,
'b')
def test_clear_volume_key_value_pair(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
key = 'a'
common = self.driver._login()
common.clear_volume_key_value_pair(self.volume, key)
expected = [
mock.call.removeVolumeMetaData(self.VOLUME_3PAR_NAME, key)]
mock_client.assert_has_calls(expected)
# check the exception
mock_client.removeVolumeMetaData.side_effect = Exception('fake')
self.assertRaises(exception.VolumeBackendAPIException,
common.clear_volume_key_value_pair,
self.volume,
None)
def test_extend_volume(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
growth_size_mib = grow_size * units.Ki
expected = [
mock.call.growVolume(self.VOLUME_3PAR_NAME, growth_size_mib)]
mock_client.assert_has_calls(expected)
def test_extend_volume_non_base(self):
extend_ex = hpexceptions.HTTPForbidden(error={'code': 150})
conf = {
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {},
# Throw an exception first time only
'growVolume.side_effect': [extend_ex,
None],
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.driver.extend_volume(self.volume, str(new_size))
self.assertEqual(2, mock_client.growVolume.call_count)
def test_extend_volume_non_base_failure(self):
extend_ex = hpexceptions.HTTPForbidden(error={'code': 150})
conf = {
'getTask.return_value': {
'status': 1},
'getCPG.return_value': {},
'copyVolume.return_value': {'taskid': 1},
'getVolume.return_value': {},
# Always fail
'growVolume.side_effect': extend_ex
}
mock_client = self.setup_driver(mock_conf=conf)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
grow_size = 3
old_size = self.volume['size']
new_size = old_size + grow_size
self.assertRaises(hpexceptions.HTTPForbidden,
self.driver.extend_volume,
self.volume,
str(new_size))
def test_get_ports(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getPorts.return_value = {
'members': [
{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.120.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8},
{'portWWN': '20210002AC00383D',
'protocol': 1,
'linkState': 4,
'mode': 2,
'device': ['cage2'],
'nodeWWN': '20210002AC00383D',
'type': 2,
'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
ports = common.get_ports()['members']
self.assertEqual(len(ports), 3)
def test_get_by_qos_spec_with_scoping(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
qos_ref = qos_specs.create(self.ctxt, 'qos-specs-1', self.QOS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'})
def test_get_by_qos_spec(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
qos_ref = qos_specs.create(
self.ctxt,
'qos-specs-1',
self.QOS_SPECS)
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
qos_specs.associate_qos_with_type(self.ctxt,
qos_ref['id'],
type_ref['id'])
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '1000', 'maxBWS': '50',
'minIOPS': '100', 'minBWS': '25',
'latency': '25', 'priority': 'low'})
def test_get_by_qos_by_type_only(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
type_ref = volume_types.create(self.ctxt,
"type1", {"qos:maxIOPS": "100",
"qos:maxBWS": "50",
"qos:minIOPS": "10",
"qos:minBWS": "20",
"qos:latency": "5",
"qos:priority": "high"})
type_ref = volume_types.get_volume_type(self.ctxt, type_ref['id'])
qos = common._get_qos_by_volume_type(type_ref)
self.assertEqual(qos, {'maxIOPS': '100', 'maxBWS': '50',
'minIOPS': '10', 'minBWS': '20',
'latency': '5', 'priority': 'high'})
def test_create_vlun(self):
host = 'fake-host'
lun_id = 11
nsp = '1:2:3'
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
location = ("%(name)s,%(lunid)s,%(host)s,%(nsp)s" %
{'name': self.VOLUME_NAME,
'lunid': lun_id,
'host': host,
'nsp': nsp})
mock_client.createVLUN.return_value = location
expected_info = {'volume_name': self.VOLUME_NAME,
'lun_id': lun_id,
'host_name': host,
'nsp': nsp}
common = self.driver._login()
vlun_info = common._create_3par_vlun(
self.VOLUME_NAME,
host,
nsp)
self.assertEqual(expected_info, vlun_info)
location = ("%(name)s,%(lunid)s,%(host)s" %
{'name': self.VOLUME_NAME,
'lunid': lun_id,
'host': host})
mock_client.createVLUN.return_value = location
expected_info = {'volume_name': self.VOLUME_NAME,
'lun_id': lun_id,
'host_name': host}
vlun_info = common._create_3par_vlun(
self.VOLUME_NAME,
host,
None)
self.assertEqual(expected_info, vlun_info)
def test__get_existing_volume_ref_name(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
result = common._get_existing_volume_ref_name(existing_ref)
self.assertEqual(unm_matcher, result)
existing_ref = {'source-id': self.volume['id']}
result = common._get_existing_volume_ref_name(existing_ref)
self.assertEqual(unm_matcher, result)
existing_ref = {'bad-key': 'foo'}
self.assertRaises(
exception.ManageExistingInvalidReference,
common._get_existing_volume_ref_name,
existing_ref)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
mock_client = self.setup_driver()
new_comment = {"display_name": "Foo Volume",
"name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",
"volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e",
"type": "OpenStack"}
volume = {'display_name': None,
'host': self.FAKE_CINDER_HOST,
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
vvs_matcher = common._get_3par_vvs_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
expected_obj = {'display_name': 'Foo Volume'}
obj = self.driver.manage_existing(volume, existing_ref)
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(
self.assertEqual, new_comment)}),
]
retype_comment_qos = {
"display_name": "Foo Volume",
"volume_type_name": self.volume_type['name'],
"volume_type_id": self.volume_type['id'],
"qos": {
'maxIOPS': '1000',
'maxBWS': '50',
'minIOPS': '100',
'minBWS': '25',
'latency': '25',
'priority': 'low'
}
}
expected_snap_cpg = HP3PAR_CPG_SNAP
expected_retype_modify = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual,
retype_comment_qos),
'snapCPG': expected_snap_cpg}),
mock.call.deleteVolumeSet(vvs_matcher),
]
expected_retype_specs = [
mock.call.createVolumeSet(vvs_matcher, None),
mock.call.createQoSRules(
vvs_matcher,
{'ioMinGoal': 100, 'ioMaxLimit': 1000,
'bwMinGoalKB': 25600, 'priority': 1, 'latencyGoal': 25,
'bwMaxLimitKB': 51200}),
mock.call.addVolumeToVolumeSet(vvs_matcher, osv_matcher),
mock.call.modifyVolume(
osv_matcher,
{'action': 6,
'userCPG': HP3PAR_CPG,
'conversionOperation': 1, 'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
mock_client.assert_has_calls(expected_retype_modify)
mock_client.assert_has_calls(
expected_retype_specs +
self.standard_logout)
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_with_no_snap_cpg(self, _mock_volume_types):
_mock_volume_types.return_value = self.volume_type
mock_client = self.setup_driver()
new_comment = {"display_name": "Foo Volume",
"name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",
"volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e",
"type": "OpenStack"}
volume = {'display_name': None,
'host': 'my-stack1@3parxxx
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MV_INFO_WITH_NO_SNAPCPG
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
expected_obj = {'display_name': 'Foo Volume'}
obj = self.driver.manage_existing(volume, existing_ref)
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(
existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(self.assertEqual,
new_comment),
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
self.assertEqual(expected_obj, obj)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_vvs(self, _mock_volume_types):
test_volume_type = self.RETYPE_VOLUME_TYPE_2
vvs = test_volume_type['extra_specs']['vvs']
_mock_volume_types.return_value = test_volume_type
mock_client = self.setup_driver()
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
id = '007abcde-7579-40bc-8f90-a20b3902283e'
new_comment = {"display_name": "Test Volume",
"name": ("volume-%s" % id),
"volume_id": id,
"type": "OpenStack"}
volume = {'display_name': 'Test Volume',
'host': 'my-stack1@3parxxx
'volume_type': 'gold',
'volume_type_id': 'acfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': id}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
vvs_matcher = common._get_3par_vvs_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Test Volume'}
expected_manage = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': self.CommentMatcher(
self.assertEqual, new_comment)})
]
retype_comment_vvs = {
"display_name": "Foo Volume",
"volume_type_name": test_volume_type['name'],
"volume_type_id": test_volume_type['id'],
"vvs": vvs
}
expected_retype = [
mock.call.modifyVolume(osv_matcher,
{'comment': self.CommentMatcher(
self.assertEqual,
retype_comment_vvs),
'snapCPG': 'OpenStackCPGSnap'}),
mock.call.deleteVolumeSet(vvs_matcher),
mock.call.addVolumeToVolumeSet(vvs, osv_matcher),
mock.call.modifyVolume(osv_matcher,
{'action': 6,
'userCPG': 'CPGNOTUSED',
'conversionOperation': 1,
'tuneOperation': 1}),
mock.call.getTask(1)
]
mock_client.assert_has_calls(self.standard_login + expected_manage)
mock_client.assert_has_calls(
expected_retype +
self.standard_logout)
self.assertEqual(expected_obj, obj)
def test_manage_existing_no_volume_type(self):
mock_client = self.setup_driver()
comment = (
'{"display_name": "Foo Volume"}')
new_comment = (
'{"type": "OpenStack",'
' "display_name": "Foo Volume",'
' "name": "volume-007dbfce-7579-40bc-8f90-a20b3902283e",'
' "volume_id": "007dbfce-7579-40bc-8f90-a20b3902283e"}')
volume = {'display_name': None,
'volume_type': None,
'volume_type_id': None,
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = {'comment': comment,
'userCPG': 'testUserCpg0'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Foo Volume'}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment,
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
volume['display_name'] = 'Test Volume'
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': 'Test Volume'}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment,
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
mock_client.getVolume.return_value = {'userCPG': 'testUserCpg0'}
volume['display_name'] = None
common = self.driver._login()
obj = self.driver.manage_existing(volume, existing_ref)
expected_obj = {'display_name': None}
expected = [
mock.call.getVolume(existing_ref['source-name']),
mock.call.modifyVolume(existing_ref['source-name'],
{'newName': osv_matcher,
'comment': new_comment,
# manage_existing() should be setting
# blank snapCPG to the userCPG
'snapCPG': 'testUserCpg0'})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_obj, obj)
def test_manage_existing_invalid_input(self):
mock_client = self.setup_driver()
volume = {'display_name': None,
'volume_type': None,
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound('fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_manage_existing_volume_type_exception(self):
mock_client = self.setup_driver()
comment = (
'{"display_name": "Foo Volume"}')
volume = {'display_name': None,
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = {'comment': comment}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.ManageExistingVolumeTypeMismatch,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch.object(volume_types, 'get_volume_type')
def test_manage_existing_retype_exception(self, _mock_volume_types):
mock_client = self.setup_driver()
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {
'cpg': HP3PAR_CPG,
'snap_cpg': HP3PAR_CPG_SNAP,
'vvs_name': self.VVS_NAME,
'qos': self.QOS,
'tpvv': True,
'tdvv': False,
'volume_type': self.volume_type}}
volume = {'display_name': None,
'host': 'stack1@3pariscsi
'volume_type': 'gold',
'volume_type_id': 'bcfa9fa4-54a0-4340-a3d8-bfcf19aea65e',
'id': '007dbfce-7579-40bc-8f90-a20b3902283e'}
mock_client.getVolume.return_value = self.MANAGE_VOLUME_INFO
mock_client.modifyVolume.return_value = ("anyResponse", {'taskid': 1})
mock_client.getTask.return_value = self.STATUS_DONE
mock_client.getCPG.side_effect = [
{'domain': 'domain1'},
{'domain': 'domain2'},
{'domain': 'domain3'},
]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
osv_matcher = common._get_3par_vol_name(volume['id'])
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.Invalid3PARDomain,
self.driver.manage_existing,
volume=volume,
existing_ref=existing_ref)
expected = [
mock.call.getVolume(unm_matcher),
mock.call.modifyVolume(
unm_matcher, {
'newName': osv_matcher,
'comment': mock.ANY}),
mock.call.getCPG('POOL1'),
mock.call.getVolume(osv_matcher),
mock.call.getCPG('testUserCpg0'),
mock.call.getCPG('POOL1'),
mock.call.modifyVolume(
osv_matcher, {'newName': unm_matcher,
'comment': self.MANAGE_VOLUME_INFO
['comment']})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_manage_existing_get_size(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'sizeMiB': 2048}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
volume = {}
existing_ref = {'source-name': unm_matcher}
size = self.driver.manage_existing_get_size(volume, existing_ref)
expected_size = 2
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(expected_size, size)
def test_manage_existing_get_size_invalid_reference(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
volume = {}
existing_ref = {'source-name': self.VOLUME_3PAR_NAME}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.standard_login +
self.standard_logout)
existing_ref = {}
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
mock_client.assert_has_calls(
self.standard_login +
self.standard_logout)
def test_manage_existing_get_size_invalid_input(self):
mock_client = self.setup_driver()
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound('fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
unm_matcher = common._get_3par_unm_name(self.volume['id'])
volume = {}
existing_ref = {'source-name': unm_matcher}
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing_get_size,
volume=volume,
existing_ref=existing_ref)
expected = [mock.call.getVolume(existing_ref['source-name'])]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_unmanage(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.driver.unmanage(self.volume)
osv_matcher = common._get_3par_vol_name(self.volume['id'])
unm_matcher = common._get_3par_unm_name(self.volume['id'])
expected = [
mock.call.modifyVolume(osv_matcher, {'newName': unm_matcher})
]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test__safe_hostname(self):
long_hostname = "abc123abc123abc123abc123abc123abc123"
fixed_hostname = "abc123abc123abc123abc123abc123a"
common = hpcommon.HP3PARCommon(None)
safe_host = common._safe_hostname(long_hostname)
self.assertEqual(fixed_hostname, safe_host)
class TestHP3PARFCDriver(HP3PARBaseDriver, test.TestCase):
properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'encrypted': False,
'target_lun': 90,
'target_wwn': ['0987654321234', '123456789000987'],
'target_discovered': True,
'initiator_target_map': {'123456789012345':
['0987654321234', '123456789000987'],
'123456789054321':
['0987654321234', '123456789000987'],
}}}
def setup_driver(self, config=None, mock_conf=None, wsapi_version=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpfcdriver.HP3PARFCDriver)
if wsapi_version:
mock_client.getWsApiVersion.return_value = (
wsapi_version)
else:
mock_client.getWsApiVersion.return_value = (
self.wsapi_version_latest)
expected = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getVLUN.side_effect = [
hpexceptions.HTTPNotFound('fake')]
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getVLUN(self.VOLUME_3PAR_NAME),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, self.properties)
@mock.patch('cinder.zonemanager.utils.create_lookup_service')
def test_initialize_connection_with_lookup_single_nsp(self, mock_lookup):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
class fake_lookup_object(object):
def get_device_mapping_from_network(self, connector, target_wwns):
fake_map = {
'FAB_1': {
'target_port_wwn_list': ['0987654321234'],
'initiator_port_wwn_list': ['123456789012345']
}
}
return fake_map
mock_lookup.return_value = fake_lookup_object()
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getVLUN.side_effect = [
hpexceptions.HTTPNotFound('fake')]
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
connector = {'ip': '10.0.0.2',
'initiator': 'iqn.1993-08.org.debian:01:222',
'wwpns': [self.wwn[0]],
'wwnns': ["223456789012345"],
'host': self.FAKE_HOST}
expected_properties = {
'driver_volume_type': 'fibre_channel',
'data': {
'encrypted': False,
'target_lun': 90,
'target_wwn': ['0987654321234'],
'target_discovered': True,
'initiator_target_map': {'123456789012345':
['0987654321234']
}}}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(self.volume, connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.ANY,
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getVLUN(self.VOLUME_3PAR_NAME),
mock.call.getPorts(),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST,
portPos={'node': 7, 'slot': 1, 'cardPort': 1}),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, expected_properties)
def test_initialize_connection_encrypted(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getVLUN.side_effect = [
hpexceptions.HTTPNotFound('fake')]
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': 90, 'type': 0}]
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': 90,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume_encrypted,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getPorts(),
mock.call.getVLUN(self.VOLUME_3PAR_NAME),
mock.call.createVLUN(
self.VOLUME_3PAR_NAME,
auto=True,
hostname=self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
expected_properties = self.properties
expected_properties['data']['encrypted'] = True
self.assertDictMatch(result, expected_properties)
def test_terminate_connection(self):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
mock_client = self.setup_driver()
effects = [
[{'active': True, 'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}],
hpexceptions.HTTPNotFound,
hpexceptions.HTTPNotFound]
mock_client.getHostVLUNs.side_effect = effects
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
expected = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
# mock some deleteHost exceptions that are handled
delete_with_vlun = hpexceptions.HTTPConflict(
error={'message': "has exported VLUN"})
delete_with_hostset = hpexceptions.HTTPConflict(
error={'message': "host is a member of a set"})
mock_client.deleteHost = mock.Mock(
side_effect=[delete_with_vlun, delete_with_hostset])
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
@mock.patch('cinder.zonemanager.utils.create_lookup_service')
def test_terminate_connection_with_lookup(self, mock_lookup):
# setup_mock_client drive with default configuration
# and return the mock HTTP 3PAR client
class fake_lookup_object(object):
def get_device_mapping_from_network(self, connector, target_wwns):
fake_map = {
'FAB_1': {
'target_port_wwn_list': ['0987654321234'],
'initiator_port_wwn_list': ['123456789012345']
}
}
return fake_map
mock_lookup.return_value = fake_lookup_object()
mock_client = self.setup_driver()
effects = [
[{'active': True, 'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0}],
hpexceptions.HTTPNotFound,
hpexceptions.HTTPNotFound]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.side_effect = effects
expected = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteHost(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getPorts()]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertIn('data', conn_info)
self.assertIn('initiator_target_map', conn_info['data'])
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
# mock some deleteHost exceptions that are handled
delete_with_vlun = hpexceptions.HTTPConflict(
error={'message': "has exported VLUN"})
delete_with_hostset = hpexceptions.HTTPConflict(
error={'message': "host is a member of a set"})
mock_client.deleteHost = mock.Mock(
side_effect=[delete_with_vlun, delete_with_hostset])
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
mock_client.reset_mock()
mock_client.getHostVLUNs.side_effect = effects
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
def test_terminate_connection_more_vols(self):
mock_client = self.setup_driver()
# mock more than one vlun on the host (don't even try to remove host)
mock_client.getHostVLUNs.return_value = \
[
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0},
{'active': True,
'volumeName': 'there-is-another-volume',
'lun': None, 'type': 0},
]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
expect_less = [
mock.call.queryHost(wwns=['123456789012345', '123456789054321']),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.deleteVLUN(
self.VOLUME_3PAR_NAME,
None,
self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST),
mock.call.getHostVLUNs(self.FAKE_HOST)]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
conn_info = self.driver.terminate_connection(self.volume,
self.connector)
mock_client.assert_has_calls(
self.standard_login +
expect_less +
self.standard_logout)
self.assertNotIn('initiator_target_map', conn_info['data'])
def test_get_volume_stats(self):
config = self.setup_configuration()
config.filter_function = FILTER_FUNCTION
config.goodness_function = GOODNESS_FUNCTION
mock_client = self.setup_driver(config=config)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': '1234'
}
mock_client.getCPGAvailableSpace.return_value = {
"capacityEfficiency": {u'compaction': 594.4},
"rawFreeMiB": 1024.0 * 6,
"usableFreeMiB": 1024.0 * 3
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
stats = self.driver.get_volume_stats(True)
const = 0.0009765625
self.assertEqual(stats['storage_protocol'], 'FC')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
self.assertEqual(stats['pools'][0]['capacity_utilization'], 87.5)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
expected = [
mock.call.getStorageSystemInfo(),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPGAvailableSpace(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2),
mock.call.getCPGAvailableSpace(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'FC')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
self.assertEqual(stats['pools'][0]['capacity_utilization'], 87.5)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'FC')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'],
total_capacity_gb)
free_capacity_gb = int(
(8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] +
self.cpgs[0]['SDUsage']['usedMiB'])) * const)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'],
free_capacity_gb)
cap_util = (float(total_capacity_gb - free_capacity_gb) /
float(total_capacity_gb)) * 100
self.assertEqual(stats['pools'][0]['capacity_utilization'],
cap_util)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
common.client.deleteCPG(HP3PAR_CPG)
common.client.createCPG(HP3PAR_CPG, {})
def test_create_host(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST,
'FCPaths': [{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 1,
'slot': 2},
'vendor': None,
'wwn': self.wwn[0]},
{'driverVersion': None,
'firmwareVersion': None,
'hostSpeed': 0,
'model': None,
'portPos': {'cardPort': 1, 'node': 0,
'slot': 2},
'vendor': None,
'wwn': self.wwn[1]}]}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': 186}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.createHost(
self.FAKE_HOST,
FCWwns=['123456789012345', '123456789054321'],
optional={'domain': None, 'persona': 2}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
def test_create_invalid_host(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'), {
'name': 'fakehost.foo',
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.queryHost(wwns=['123456789012345',
'123456789054321']),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
def test_create_modify_host(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [{
'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'}, {
'wwn': '123456789054321'}]}]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345', '123456789054321'],
'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_new_wwn(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 2)
def test_modify_host_with_unknown_wwn_and_new_wwn(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
getHost_ret1 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
getHost_ret2 = {
'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'},
{'wwn': 'xxxxxxxxxxxxxxx'}]}
mock_client.getHost.side_effect = [getHost_ret1, getHost_ret2]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host = self.driver._create_host(
common,
self.volume,
self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost('fakehost'),
mock.call.modifyHost(
'fakehost', {
'FCWWNs': ['123456789012345'], 'pathOperation': 1}),
mock.call.getHost('fakehost')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(len(host['FCPaths']), 3)
class TestHP3PARISCSIDriver(HP3PARBaseDriver, test.TestCase):
TARGET_IQN = 'iqn.2000-05.com.3pardata:21810002ac00383d'
TARGET_LUN = 186
properties = {
'driver_volume_type': 'iscsi',
'data':
{'encrypted': False,
'target_discovered': True,
'target_iqn': TARGET_IQN,
'target_lun': TARGET_LUN,
'target_portal': '1.1.1.2:1234'}}
def setup_driver(self, config=None, mock_conf=None, wsapi_version=None):
self.ctxt = context.get_admin_context()
mock_client = self.setup_mock_client(
conf=config,
m_conf=mock_conf,
driver=hpdriver.HP3PARISCSIDriver)
if wsapi_version:
mock_client.getWsApiVersion.return_value = (
wsapi_version)
else:
mock_client.getWsApiVersion.return_value = (
self.wsapi_version_latest)
expected_get_cpgs = [
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2)]
expected_get_ports = [mock.call.getPorts()]
mock_client.assert_has_calls(
self.standard_login +
expected_get_cpgs +
self.standard_logout +
self.standard_login +
expected_get_ports +
self.standard_logout)
mock_client.reset_mock()
return mock_client
def test_initialize_connection(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': self.TARGET_LUN, 'type': 0}]
mock_client.getVLUN.return_value = {
'hostname': self.FAKE_HOST,
'lun': self.TARGET_LUN,
'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': self.TARGET_LUN,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getVLUN(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertDictMatch(result, self.properties)
def test_initialize_connection_encrypted(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = {
'members': [{
'name': self.FAKE_HOST
}]
}
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': self.TARGET_LUN, 'type': 0}]
mock_client.getVLUN.return_value = {
'hostname': self.FAKE_HOST,
'lun': self.TARGET_LUN,
'portPos': {'node': 8, 'slot': 1, 'cardPort': 1}}
location = ("%(volume_name)s,%(lun_id)s,%(host)s,%(nsp)s" %
{'volume_name': self.VOLUME_3PAR_NAME,
'lun_id': self.TARGET_LUN,
'host': self.FAKE_HOST,
'nsp': 'something'})
mock_client.createVLUN.return_value = location
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
result = self.driver.initialize_connection(
self.volume_encrypted,
self.connector)
expected = [
mock.call.getVolume(self.VOLUME_3PAR_NAME),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST),
mock.call.getVLUN(self.VOLUME_3PAR_NAME)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
expected_properties = self.properties
expected_properties['data']['encrypted'] = True
self.assertDictMatch(result, self.properties)
def test_get_volume_stats(self):
config = self.setup_configuration()
config.filter_function = FILTER_FUNCTION
config.goodness_function = GOODNESS_FUNCTION
mock_client = self.setup_driver(config=config)
mock_client.getCPG.return_value = self.cpgs[0]
mock_client.getStorageSystemInfo.return_value = {
'serialNumber': '1234'
}
mock_client.getCPGAvailableSpace.return_value = {
"capacityEfficiency": {u'compaction': 594.4},
"rawFreeMiB": 1024.0 * 6,
"usableFreeMiB": 1024.0 * 3
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
stats = self.driver.get_volume_stats(True)
const = 0.0009765625
self.assertEqual(stats['storage_protocol'], 'iSCSI')
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'], 24.0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'], 3.0)
self.assertEqual(stats['pools'][0]['capacity_utilization'], 87.5)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
expected = [
mock.call.getStorageSystemInfo(),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getCPGAvailableSpace(HP3PAR_CPG),
mock.call.getCPG(HP3PAR_CPG2),
mock.call.getCPGAvailableSpace(HP3PAR_CPG2)]
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
cpg2 = self.cpgs[0].copy()
cpg2.update({'SDGrowth': {'limitMiB': 8192}})
mock_client.getCPG.return_value = cpg2
stats = self.driver.get_volume_stats(True)
self.assertEqual(stats['storage_protocol'], 'iSCSI')
total_capacity_gb = 8192 * const
self.assertEqual(stats['total_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['total_capacity_gb'],
total_capacity_gb)
free_capacity_gb = int(
(8192 - (self.cpgs[0]['UsrUsage']['usedMiB'] +
self.cpgs[0]['SDUsage']['usedMiB'])) * const)
self.assertEqual(stats['free_capacity_gb'], 0)
self.assertEqual(stats['pools'][0]['free_capacity_gb'],
free_capacity_gb)
cap_util = (float(total_capacity_gb - free_capacity_gb) /
float(total_capacity_gb)) * 100
self.assertEqual(stats['pools'][0]['capacity_utilization'],
cap_util)
self.assertEqual(stats['pools'][0]['total_volumes'], 3)
self.assertEqual(stats['pools'][0]['goodness_function'],
GOODNESS_FUNCTION)
self.assertEqual(stats['pools'][0]['filter_function'],
FILTER_FUNCTION)
def test_create_host(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 2},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
def test_create_host_chap_enabled(self):
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('fake'),
{'name': self.FAKE_HOST}]
mock_client.queryHost.return_value = None
mock_client.getVLUN.return_value = {'lun': self.TARGET_LUN}
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.createHost(
self.FAKE_HOST,
optional={'domain': None, 'persona': 2},
iscsiNames=['iqn.1993-08.org.debian:01:222']),
mock.call.modifyHost(
'fakehost',
expected_mod_request),
mock.call.getHost(self.FAKE_HOST)
]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
def test_create_invalid_host(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.getHost('fakehost.foo')]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
def test_create_invalid_host_chap_enabled(self):
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
hpexceptions.HTTPNotFound('Host not found.'),
{'name': 'fakehost.foo'}]
mock_client.queryHost.return_value = {
'members': [{
'name': 'fakehost.foo'
}]
}
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.queryHost(iqns=['iqn.1993-08.org.debian:01:222']),
mock.call.modifyHost(
'fakehost.foo',
expected_mod_request),
mock.call.getHost('fakehost.foo')
]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], 'fakehost.foo')
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
def test_create_modify_host(self):
mock_client = self.setup_driver()
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, None)
self.assertEqual(auth_password, None)
self.assertEqual(len(host['FCPaths']), 2)
def test_create_modify_host_chap_enabled(self):
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
mock_client.getVolume.return_value = {'userCPG': HP3PAR_CPG}
mock_client.getCPG.return_value = {}
mock_client.getHost.side_effect = [
{'name': self.FAKE_HOST, 'FCPaths': []},
{'name': self.FAKE_HOST,
'FCPaths': [{'wwn': '123456789012345'},
{'wwn': '123456789054321'}]}]
def get_side_effect(*args):
data = {'value': None}
if args[1] == CHAP_USER_KEY:
data['value'] = 'test-user'
elif args[1] == CHAP_PASS_KEY:
data['value'] = 'test-pass'
return data
mock_client.getVolumeMetaData.side_effect = get_side_effect
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-user',
'chapSecret': 'test-pass'
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
host, auth_username, auth_password = self.driver._create_host(
common, self.volume, self.connector)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getCPG(HP3PAR_CPG),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.getHost(self.FAKE_HOST),
mock.call.modifyHost(
self.FAKE_HOST,
{'pathOperation': 1,
'iSCSINames': ['iqn.1993-08.org.debian:01:222']}),
mock.call.modifyHost(
self.FAKE_HOST,
expected_mod_request
),
mock.call.getHost(self.FAKE_HOST)]
mock_client.assert_has_calls(expected)
self.assertEqual(host['name'], self.FAKE_HOST)
self.assertEqual(auth_username, 'test-user')
self.assertEqual(auth_password, 'test-pass')
self.assertEqual(len(host['FCPaths']), 2)
def test_get_least_used_nsp_for_host_single(self):
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
iscsi_ips = ["10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nsp_for_host_new(self):
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertEqual(nsp, "1:8:2")
def test_get_least_used_nsp_for_host_reuse(self):
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS_RET
mock_client.getVLUNs.return_value = VLUNS1_RET
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'foo')
self.assertEqual(nsp, "1:8:2")
nsp = self.driver._get_least_used_nsp_for_host(common, 'bar')
self.assertEqual(nsp, "1:8:1")
def test_get_least_used_nps_for_host_fc(self):
mock_client = self.setup_driver()
mock_client.getPorts.return_value = PORTS1_RET
mock_client.getVLUNs.return_value = VLUNS5_RET
iscsi_ips = ["10.10.220.252", "10.10.220.253"]
self.driver.configuration.hp3par_iscsi_ips = iscsi_ips
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
self.driver.initialize_iscsi_ports(common)
nsp = self.driver._get_least_used_nsp_for_host(common, 'newhost')
self.assertNotEqual(nsp, "0:6:3")
self.assertEqual(nsp, "1:8:1")
def test_invalid_iscsi_ip(self):
config = self.setup_configuration()
config.hp3par_iscsi_ips = ['10.10.220.250', '10.10.220.251']
config.iscsi_ip_address = '10.10.10.10'
mock_conf = {
'getPorts.return_value': {
'members': [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': self.TARGET_IQN,
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]}}
self.assertRaises(exception.InvalidInput,
self.setup_driver,
config=config,
mock_conf=mock_conf)
def test_get_least_used_nsp(self):
mock_client = self.setup_driver()
ports = [
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 2}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1}, 'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['0:2:1', '1:8:1'])
self.assertEqual(nsp, '1:8:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['0:2:1', '1:2:1'])
self.assertEqual(nsp, '1:2:1')
ports = [
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 1, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True},
{'portPos': {'node': 0, 'slot': 2, 'cardPort': 1},
'active': True}]
mock_client.getVLUNs.return_value = {'members': ports}
common = self.driver._login()
vluns = common.client.getVLUNs()
nsp = self.driver._get_least_used_nsp(common, vluns['members'],
['1:1:1', '1:2:1'])
self.assertEqual(nsp, '1:1:1')
def test_set_3par_chaps(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
expected = []
self.driver._set_3par_chaps(
common, 'test-host', 'test-vol', 'test-host', 'pass')
mock_client.assert_has_calls(expected)
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
expected_mod_request = {
'chapOperation': mock_client.HOST_EDIT_ADD,
'chapOperationMode': mock_client.CHAP_INITIATOR,
'chapName': 'test-host',
'chapSecret': 'fake'
}
expected = [
mock.call.modifyHost('test-host', expected_mod_request)
]
self.driver._set_3par_chaps(
common, 'test-host', 'test-vol', 'test-host', 'fake')
mock_client.assert_has_calls(expected)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export(self, mock_utils):
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = []
expected_model = {'provider_auth': None}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
mock_client.reset_mock()
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'osv-0DM4qZEVSKON-DXN-NwVpw',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_host_not_found(self, mock_utils):
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = "random-pass"
mock_client.getHostVLUNs.side_effect = hpexceptions.HTTPNotFound(
'fake')
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_host_chap_disabled(self, mock_utils):
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = 'random-pass'
mock_client.getHostVLUNs.return_value = [
{'active': True,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'fake-host',
'initiatorChapEnabled': False
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.getVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(expected_model, model)
@mock.patch('cinder.volume.utils.generate_password')
def test_do_export_no_active_vluns(self, mock_utils):
config = self.setup_configuration()
config.hp3par_iscsi_chap_enabled = True
mock_client = self.setup_driver(config=config)
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_utils.return_value = "random-pass"
mock_client.getHostVLUNs.return_value = [
{'active': False,
'volumeName': self.VOLUME_3PAR_NAME,
'lun': None, 'type': 0,
'remoteName': 'iqn.1993-08.org.debian:01:222'}
]
mock_client.getHost.return_value = {
'name': 'fake-host',
'initiatorChapEnabled': True
}
mock_client.getVolumeMetaData.return_value = {
'value': 'random-pass'
}
expected = [
mock.call.getHostVLUNs('test-host'),
mock.call.getHost('test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_USER_KEY, 'test-host'),
mock.call.setVolumeMetaData(
'osv-0DM4qZEVSKON-DXN-NwVpw', CHAP_PASS_KEY, 'random-pass')
]
expected_model = {'provider_auth': 'CHAP test-host random-pass'}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model = self.driver._do_export(common, volume)
mock_client.assert_has_calls(expected)
self.assertEqual(model, expected_model)
def test_ensure_export(self):
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_client.getAllVolumeMetaData.return_value = {
'total': 0,
'members': []
}
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model = self.driver.ensure_export(None, volume)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw')
]
expected_model = {'provider_auth': None}
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
mock_client.getAllVolumeMetaData.return_value = {
'total': 2,
'members': [
{
'creationTimeSec': 1406074222,
'value': 'fake-host',
'key': CHAP_USER_KEY,
'creationTime8601': '2014-07-22T17:10:22-07:00'
},
{
'creationTimeSec': 1406074222,
'value': 'random-pass',
'key': CHAP_PASS_KEY,
'creationTime8601': '2014-07-22T17:10:22-07:00'
}
]
}
model = self.driver.ensure_export(None, volume)
expected = [
mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw'),
mock.call.getAllVolumeMetaData('osv-0DM4qZEVSKON-DXN-NwVpw')
]
expected_model = {'provider_auth': "CHAP fake-host random-pass"}
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
def test_ensure_export_missing_volume(self):
mock_client = self.setup_driver()
volume = {'host': 'test-host@3pariscsi',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
mock_client.getVolume.side_effect = hpexceptions.HTTPNotFound(
'fake')
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
model = self.driver.ensure_export(None, volume)
expected = [mock.call.getVolume('osv-0DM4qZEVSKON-DXN-NwVpw')]
expected_model = None
mock_client.assert_has_calls(
self.standard_login +
expected +
self.standard_logout)
self.assertEqual(model, expected_model)
@mock.patch.object(volume_types, 'get_volume_type')
def test_get_volume_settings_default_pool(self, _mock_volume_types):
_mock_volume_types.return_value = {
'name': 'gold',
'id': 'gold-id',
'extra_specs': {}}
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
volume = {'host': 'test-host@3pariscsi#pool_foo',
'id': 'd03338a9-9115-48a3-8dfc-35cdfcdc15a7'}
pool = volume_utils.extract_host(volume['host'], 'pool')
model = common.get_volume_settings_from_type_id('gold-id', pool)
self.assertEqual(model['cpg'], 'pool_foo')
def test_get_model_update(self):
mock_client = self.setup_driver()
with mock.patch.object(hpcommon.HP3PARCommon,
'_create_client') as mock_create_client:
mock_create_client.return_value = mock_client
common = self.driver._login()
model_update = common._get_model_update('xxx@yyy#zzz', 'CPG')
self.assertEqual(model_update, {'host': 'xxx@yyy#CPG'})
VLUNS5_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'active': True}]})
PORTS_RET = ({'members':
[{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.220.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8}]})
VLUNS1_RET = ({'members':
[{'portPos': {'node': 1, 'slot': 8, 'cardPort': 2},
'hostname': 'foo', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'hostname': 'bar', 'active': True}]})
PORTS1_RET = ({'members':
[{'portPos': {'node': 0, 'slot': 8, 'cardPort': 2},
'protocol': 2,
'IPAddr': '10.10.120.252',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21820002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D2',
'type': 8},
{'portPos': {'node': 1, 'slot': 8, 'cardPort': 1},
'protocol': 2,
'IPAddr': '10.10.220.253',
'linkState': 4,
'device': [],
'iSCSIName': 'iqn.2000-05.com.3pardata:21810002ac00383d',
'mode': 2,
'HWAddr': '2C27D75375D6',
'type': 8},
{'portWWN': '20210002AC00383D',
'protocol': 1,
'linkState': 4,
'mode': 2,
'device': ['cage2'],
'nodeWWN': '20210002AC00383D',
'type': 2,
'portPos': {'node': 0, 'slot': 6, 'cardPort': 3}}]})
| true | true |
f7249cfe31de6802563053234016e283c91f0986 | 878 | py | Python | tests/conftest.py | lycantropos/bentley_ottmann | 988075aada80e5d5c8d53d513de130004b69c3b9 | [
"MIT"
] | 13 | 2020-04-03T04:43:44.000Z | 2022-01-18T10:40:40.000Z | tests/conftest.py | lycantropos/bentley_ottmann | 988075aada80e5d5c8d53d513de130004b69c3b9 | [
"MIT"
] | 19 | 2020-01-31T05:25:42.000Z | 2021-04-01T13:20:05.000Z | tests/conftest.py | lycantropos/bentley_ottmann | 988075aada80e5d5c8d53d513de130004b69c3b9 | [
"MIT"
] | 3 | 2020-06-08T11:15:32.000Z | 2021-02-15T12:37:01.000Z | import os
import platform
import pytest
from ground.base import (Context,
get_context)
from hypothesis import (HealthCheck,
settings)
on_azure_pipelines = bool(os.getenv('TF_BUILD', False))
is_pypy = platform.python_implementation() == 'PyPy'
settings.register_profile('default',
deadline=None,
max_examples=(settings.default.max_examples
// (1 + 3 * is_pypy)
if on_azure_pipelines
else settings.default.max_examples),
suppress_health_check=[HealthCheck.filter_too_much,
HealthCheck.too_slow])
@pytest.fixture(scope='session')
def context() -> Context:
return get_context()
| 35.12 | 77 | 0.525057 | import os
import platform
import pytest
from ground.base import (Context,
get_context)
from hypothesis import (HealthCheck,
settings)
on_azure_pipelines = bool(os.getenv('TF_BUILD', False))
is_pypy = platform.python_implementation() == 'PyPy'
settings.register_profile('default',
deadline=None,
max_examples=(settings.default.max_examples
// (1 + 3 * is_pypy)
if on_azure_pipelines
else settings.default.max_examples),
suppress_health_check=[HealthCheck.filter_too_much,
HealthCheck.too_slow])
@pytest.fixture(scope='session')
def context() -> Context:
return get_context()
| true | true |
f7249ecb6643e8a4d8abef519c48499b5a5bb0e4 | 630 | py | Python | Django/Video_Project/Day05/SqlToModel/manage.py | pyforspider/LearningLog | ac5988d7fbb0d07d6e7485f9050250af5bcba089 | [
"MIT"
] | null | null | null | Django/Video_Project/Day05/SqlToModel/manage.py | pyforspider/LearningLog | ac5988d7fbb0d07d6e7485f9050250af5bcba089 | [
"MIT"
] | 18 | 2020-02-12T01:18:12.000Z | 2022-03-12T00:42:15.000Z | Django/Video_Project/Day05/SqlToModel/manage.py | pyforspider/LearningLog | ac5988d7fbb0d07d6e7485f9050250af5bcba089 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SqlToModel.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.636364 | 74 | 0.684127 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SqlToModel.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| true | true |
f7249eeef430a62b4cf9cb984568e8dc1fa829d5 | 1,321 | py | Python | diffie_hellman.py | faroos3/crypto_netsec_hw2 | 72f8e0baf93b555e41426a45149a02004bbe8b04 | [
"MIT",
"Unlicense"
] | null | null | null | diffie_hellman.py | faroos3/crypto_netsec_hw2 | 72f8e0baf93b555e41426a45149a02004bbe8b04 | [
"MIT",
"Unlicense"
] | null | null | null | diffie_hellman.py | faroos3/crypto_netsec_hw2 | 72f8e0baf93b555e41426a45149a02004bbe8b04 | [
"MIT",
"Unlicense"
] | null | null | null | '''
This is the file that contains the Diffie-Hellman algorithm to establish shared keys between Alice, Bob, and the KDC. It'll just be functions that are going
to be used in other files.
Using this video to help me out with Diffie-Hellman: https://www.youtube.com/watch?v=Yjrfm_oRO0w
g and n are public numbers. g is a small prime number, whereas n is a reallllly big number. Alice and Bob pick two numbers a and b which are 1 <= a | b <= n.
n is often 4000 bits long, don't think I'll have mine be that big but whatever.
a and b are selected by the KDC and are kept private between A and B. I guess it would send what it picks to Alice/Bob?
According to our slides, g is alpha and q is n, and n/q has to be prime.
The KDC should be a client whereas Alice and Bob will be clients that connect to it.
On Alice's side (a client), it computes (g^a)mod n.
On Bob's side (another client), it computes (g^b)mod n.
Bob sends the g^b mod n and Alice sends g^a mod n, and they computer (g^a)^b mod n and (g^b)^a mod n and that's the private key.
This is done between Alice and the KDC, and then Bob and the KDC.
'''
import random # want to use random.randint(1, n)
import time
import toy_des
# these are in the public domain of Diffie-Hellman
g = 331
n = 1021
def key_maker():
if __name__ == "__main__": | 36.694444 | 157 | 0.723694 | '''
This is the file that contains the Diffie-Hellman algorithm to establish shared keys between Alice, Bob, and the KDC. It'll just be functions that are going
to be used in other files.
Using this video to help me out with Diffie-Hellman: https://www.youtube.com/watch?v=Yjrfm_oRO0w
g and n are public numbers. g is a small prime number, whereas n is a reallllly big number. Alice and Bob pick two numbers a and b which are 1 <= a | b <= n.
n is often 4000 bits long, don't think I'll have mine be that big but whatever.
a and b are selected by the KDC and are kept private between A and B. I guess it would send what it picks to Alice/Bob?
According to our slides, g is alpha and q is n, and n/q has to be prime.
The KDC should be a client whereas Alice and Bob will be clients that connect to it.
On Alice's side (a client), it computes (g^a)mod n.
On Bob's side (another client), it computes (g^b)mod n.
Bob sends the g^b mod n and Alice sends g^a mod n, and they computer (g^a)^b mod n and (g^b)^a mod n and that's the private key.
This is done between Alice and the KDC, and then Bob and the KDC.
'''
import random
import time
import toy_des
g = 331
n = 1021
def key_maker():
if __name__ == "__main__": | false | true |
f7249f59bdaa349ad040c4306eb1c2ca214840ed | 5,504 | py | Python | accounts/tests.py | mgovoni-devel/MatD3 | 5b68d147f886bce427f92bb560159e62cec2d4e7 | [
"BSD-2-Clause-FreeBSD"
] | 7 | 2019-09-14T07:24:09.000Z | 2021-06-15T16:15:05.000Z | accounts/tests.py | mgovoni-devel/MatD3 | 5b68d147f886bce427f92bb560159e62cec2d4e7 | [
"BSD-2-Clause-FreeBSD"
] | 14 | 2019-12-05T01:49:19.000Z | 2021-06-23T18:34:51.000Z | accounts/tests.py | mgovoni-devel/MatD3 | 5b68d147f886bce427f92bb560159e62cec2d4e7 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-11-06T21:16:57.000Z | 2019-11-30T10:51:44.000Z | # This file is covered by the BSD license. See LICENSE in the root directory.
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
User = get_user_model()
USERNAME = 'testuser'
PASSWORD = '28&}>z1-%ZY|0ATwGU+7I!F7pJ:+(E'
FIRSTNAME = 'first'
LASTNAME = 'last'
EMAIL = 'mail@example.com'
DESCRIPTION = 'description'
INSTITUTION = 'institution'
WEBSITE = 'http://example.com'
class UserCreationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(
username='superuser', email=EMAIL, is_superuser=True)
Group.objects.create(name='users')
def test_success(self):
response = self.client.post(reverse('accounts:register'), {
'username': USERNAME,
'email': EMAIL,
'password1': PASSWORD,
'password2': PASSWORD,
})
self.assertFalse(User.objects.last().is_active)
self.assertContains(response, 'Confirmation email has been sent.')
for line in mail.outbox[0].body.splitlines():
line_stripped = line.lstrip()
if line_stripped.startswith('http'):
activation_url = line_stripped
break
response = self.client.get(activation_url, follow=True)
self.assertRedirects(response, reverse('accounts:profile'))
self.assertContains(response, 'Account confirmed.')
self.assertTrue(User.objects.last().is_active)
self.assertFalse(User.objects.last().is_staff)
self.assertEqual(len(mail.outbox), 2)
def test_no_email_or_username(self):
response = self.client.post(reverse('accounts:register'), {
'username': USERNAME, 'password1': PASSWORD, 'password2': PASSWORD,
})
self.assertContains(response, 'This field is required')
response = self.client.post(reverse('accounts:register'), {
'email': EMAIL, 'password1': PASSWORD, 'password2': PASSWORD,
})
self.assertContains(response, 'This field is required')
self.assertEqual(User.objects.count(), 1)
def test_incorrect_activation(self):
uid = 'MMM'
token = '00a-'+20*'0'
response = self.client.get(
reverse('accounts:activate', kwargs={'uid': uid, 'token': token}),
follow=True)
self.assertContains(response, 'Activation link is invalid!')
def test_user_profile(self):
user = User.objects.create(username=USERNAME, email=EMAIL)
user.set_password(PASSWORD)
user.save()
response = self.client.get(reverse('accounts:profile'))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.client.force_login(user)
response = self.client.get(reverse('accounts:profile'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post(reverse('accounts:profile'), {
'first_name': FIRSTNAME,
'last_name': LASTNAME,
'email': EMAIL,
'description': DESCRIPTION,
'institution': INSTITUTION,
'website': WEBSITE,
}, follow=True)
user = User.objects.last()
self.assertEqual(user.first_name, FIRSTNAME)
self.assertEqual(user.last_name, LASTNAME)
self.assertEqual(user.userprofile.description, DESCRIPTION)
self.assertEqual(user.userprofile.institution, INSTITUTION)
self.assertEqual(user.userprofile.website, WEBSITE)
def test_change_password(self):
response = self.client.post(reverse('accounts:change_password'))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
user = User.objects.first()
user.set_password(PASSWORD)
user.save()
self.client.force_login(user)
response = self.client.post(reverse('accounts:change_password'),
{'old_password': PASSWORD})
self.assertContains(response,
'Incorrect password or new passwords not matching')
response = self.client.post(reverse('accounts:change_password'), {
'old_password': PASSWORD,
'new_password1': PASSWORD,
'new_password2': PASSWORD,
}, follow=True)
self.assertNotContains(
response, 'Incorrect password or new passwords not matching')
self.assertContains(response, 'Password successfully changed')
class TemplateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(
username='superuser', email=EMAIL, is_superuser=True)
cls.user = User.objects.create(
username=USERNAME, is_active=True)
def test_buttons(self):
response = self.client.get('')
self.assertContains(response, 'Register')
self.client.force_login(self.user)
response = self.client.get('')
self.assertContains(response, 'Profile')
self.assertNotContains(response, 'Add Data')
self.user.is_staff = True
self.user.save()
response = self.client.get('')
self.assertContains(response, 'Add Data')
class AnonymousUserTestCase(TestCase):
def test_load_pages(self):
response = self.client.get(reverse('accounts:profile'))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
| 40.175182 | 79 | 0.649891 |
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core import mail
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
User = get_user_model()
USERNAME = 'testuser'
PASSWORD = '28&}>z1-%ZY|0ATwGU+7I!F7pJ:+(E'
FIRSTNAME = 'first'
LASTNAME = 'last'
EMAIL = 'mail@example.com'
DESCRIPTION = 'description'
INSTITUTION = 'institution'
WEBSITE = 'http://example.com'
class UserCreationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(
username='superuser', email=EMAIL, is_superuser=True)
Group.objects.create(name='users')
def test_success(self):
response = self.client.post(reverse('accounts:register'), {
'username': USERNAME,
'email': EMAIL,
'password1': PASSWORD,
'password2': PASSWORD,
})
self.assertFalse(User.objects.last().is_active)
self.assertContains(response, 'Confirmation email has been sent.')
for line in mail.outbox[0].body.splitlines():
line_stripped = line.lstrip()
if line_stripped.startswith('http'):
activation_url = line_stripped
break
response = self.client.get(activation_url, follow=True)
self.assertRedirects(response, reverse('accounts:profile'))
self.assertContains(response, 'Account confirmed.')
self.assertTrue(User.objects.last().is_active)
self.assertFalse(User.objects.last().is_staff)
self.assertEqual(len(mail.outbox), 2)
def test_no_email_or_username(self):
response = self.client.post(reverse('accounts:register'), {
'username': USERNAME, 'password1': PASSWORD, 'password2': PASSWORD,
})
self.assertContains(response, 'This field is required')
response = self.client.post(reverse('accounts:register'), {
'email': EMAIL, 'password1': PASSWORD, 'password2': PASSWORD,
})
self.assertContains(response, 'This field is required')
self.assertEqual(User.objects.count(), 1)
def test_incorrect_activation(self):
uid = 'MMM'
token = '00a-'+20*'0'
response = self.client.get(
reverse('accounts:activate', kwargs={'uid': uid, 'token': token}),
follow=True)
self.assertContains(response, 'Activation link is invalid!')
def test_user_profile(self):
user = User.objects.create(username=USERNAME, email=EMAIL)
user.set_password(PASSWORD)
user.save()
response = self.client.get(reverse('accounts:profile'))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
self.client.force_login(user)
response = self.client.get(reverse('accounts:profile'))
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self.client.post(reverse('accounts:profile'), {
'first_name': FIRSTNAME,
'last_name': LASTNAME,
'email': EMAIL,
'description': DESCRIPTION,
'institution': INSTITUTION,
'website': WEBSITE,
}, follow=True)
user = User.objects.last()
self.assertEqual(user.first_name, FIRSTNAME)
self.assertEqual(user.last_name, LASTNAME)
self.assertEqual(user.userprofile.description, DESCRIPTION)
self.assertEqual(user.userprofile.institution, INSTITUTION)
self.assertEqual(user.userprofile.website, WEBSITE)
def test_change_password(self):
response = self.client.post(reverse('accounts:change_password'))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
user = User.objects.first()
user.set_password(PASSWORD)
user.save()
self.client.force_login(user)
response = self.client.post(reverse('accounts:change_password'),
{'old_password': PASSWORD})
self.assertContains(response,
'Incorrect password or new passwords not matching')
response = self.client.post(reverse('accounts:change_password'), {
'old_password': PASSWORD,
'new_password1': PASSWORD,
'new_password2': PASSWORD,
}, follow=True)
self.assertNotContains(
response, 'Incorrect password or new passwords not matching')
self.assertContains(response, 'Password successfully changed')
class TemplateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
User.objects.create(
username='superuser', email=EMAIL, is_superuser=True)
cls.user = User.objects.create(
username=USERNAME, is_active=True)
def test_buttons(self):
response = self.client.get('')
self.assertContains(response, 'Register')
self.client.force_login(self.user)
response = self.client.get('')
self.assertContains(response, 'Profile')
self.assertNotContains(response, 'Add Data')
self.user.is_staff = True
self.user.save()
response = self.client.get('')
self.assertContains(response, 'Add Data')
class AnonymousUserTestCase(TestCase):
def test_load_pages(self):
response = self.client.get(reverse('accounts:profile'))
self.assertEqual(response.status_code, status.HTTP_302_FOUND)
| true | true |
f724a0579c82ae147fc51ad5b6508680a848d799 | 27,245 | py | Python | app/lib/dns/import_manager.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 152 | 2020-12-07T13:26:53.000Z | 2022-03-23T02:00:04.000Z | app/lib/dns/import_manager.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 16 | 2020-12-07T17:04:36.000Z | 2022-03-10T11:12:52.000Z | app/lib/dns/import_manager.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 36 | 2020-12-09T13:04:40.000Z | 2022-03-12T18:14:36.000Z | from app.lib.dns.helpers.shared import SharedHelper
import os
import datetime
import json
import progressbar
from app import db
class DNSImportManager(SharedHelper):
IMPORT_TYPE_ZONE = 1
IMPORT_TYPE_RECORD = 2
@property
def last_error(self):
return self.__last_error
@last_error.setter
def last_error(self, value):
self.__last_error = value
def __init__(self, dns_zones, dns_records, users):
self.__last_error = ''
self.__dns_zones = dns_zones
self.__dns_records = dns_records
self.__zone_headers = ['domain', 'active', 'catch_all', 'forwarding', 'regex', 'master', 'tags']
self.__record_headers = ['domain', 'id', 'ttl', 'cls', 'type', 'active', 'data', 'is_conditional', 'conditional_count', 'conditional_limit', 'conditional_reset', 'conditional_data']
self.__users = users
def identify(self, csvfile):
self.last_error = ''
if not os.path.isfile(csvfile):
self.last_error = 'CSV file does not exist'
return False
header = self._load_csv_header(csvfile)
zone_header_count = 0
record_header_count = 0
for column in header:
if column in self.__zone_headers:
zone_header_count += 1
if column in self.__record_headers:
record_header_count += 1
if zone_header_count == len(self.__zone_headers):
return self.IMPORT_TYPE_ZONE
elif record_header_count == len(self.__record_headers):
return self.IMPORT_TYPE_RECORD
self.last_error = 'If you are uploading a ZONE file these are the required columns: {0}. If you are uploading a RECORD file then the required columns are: {1}.'.format(', '.join(self.__zone_headers), ', '.join(self.__record_headers))
return False
def review(self, csvfile, type, user_id, show_progressbar=False):
self.last_error = ''
if not os.path.isfile(csvfile):
self.last_error = 'CSV file does not exist'
return False
lines = self._load_csv(csvfile)
if len(lines) == 0:
self.last_error = 'CSV is empty'
return False
user = self.__users.get_user(user_id)
if not user:
self.last_error = 'Could not find user with ID {0}'.format(user_id)
return False
all_errors = []
errors = []
rows = []
if type == self.IMPORT_TYPE_ZONE:
rows = self.__categorise_rows(lines, type)
rows, errors = self.__process_zones(rows, user, show_progressbar=show_progressbar)
elif type == self.IMPORT_TYPE_RECORD:
rows = self.__categorise_rows(lines, type)
rows, errors = self.__process_records(rows, user, show_progressbar=show_progressbar)
all_errors += errors
# Sort errors per row number.
all_errors = sorted(all_errors, key=lambda k: k['row'])
return {
'data': rows,
'errors': all_errors
}
def run(self, data, type, user_id, show_progressbar=False):
errors = []
if type == self.IMPORT_TYPE_ZONE:
self.__import_zones(data, user_id, show_progressbar=show_progressbar)
elif type == self.IMPORT_TYPE_RECORD:
self.__import_records(data, user_id, errors, show_progressbar=show_progressbar)
return errors if len(errors) > 0 else True
def __import_zones(self, zones, user_id, show_progressbar=False, batch_size=100):
"""
This function has been heavily optimised as when I tried to import 250k domains its ETA was 1.5h, which isn't
very practical. The main assumption made here is that when this function is called, all validation checks will
have ready been completed.
"""
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
count = 0
unique_tags = []
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
# with bar as zones:
for zone_to_import in list(zones):
count += 1
bar.update(count) if show_progressbar else False
self.__zone_update_or_create(
zone_to_import['domain'],
zone_to_import['active'],
zone_to_import['catch_all'],
zone_to_import['forwarding'],
zone_to_import['regex'],
zone_to_import['master'],
user_id,
id=zone_to_import['id'],
autocommit=False
)
if count % batch_size == 0:
db.session.commit()
unique_tags = list(set(unique_tags + zone_to_import['tags']))
db.session.commit()
if show_progressbar:
widget[0] = progressbar.FormatLabel('Re-mapping zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
domain_mapping = self.__get_domain_mapping(user_id)
zone_ids = []
i = 0
for zone_to_import in list(zones):
i += 1
bar.update(i) if show_progressbar else False
zone_to_import['id'] = domain_mapping[zone_to_import['domain']] if zone_to_import['domain'] in domain_mapping else 0
zone_ids.append(zone_to_import['id'])
self.__zone_clear_tags(zone_ids, show_progressbar=show_progressbar, widget=widget)
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing tags')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
self.__tags_create(user_id, unique_tags)
tag_mapping = self.__get_tag_mapping(user_id)
count = 0
for zone_to_import in list(zones):
count += 1
bar.update(count) if show_progressbar else False
tags = {}
for tag in zone_to_import['tags']:
tags[tag] = tag_mapping[tag]
self.__zone_save_tags(zone_to_import['id'], tags, autocommit=False)
if count % batch_size == 0:
db.session.commit()
db.session.commit()
return True
def __import_records(self, records, user_id, errors, show_progressbar=False, batch_size = 100):
domain_mapping = self.__get_domain_mapping(user_id)
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing records')
bar = progressbar.ProgressBar(max_value=len(records), widgets=widget)
count = 0
for record_to_import in records:
count += 1
bar.update(count) if show_progressbar else False
# First, get the zone.
zone_id = domain_mapping[record_to_import['domain']] if record_to_import['domain'] in domain_mapping else None
if not zone_id:
# At this point all zones should exist.
errors.append('Could not find zone: {0}'.format(record_to_import['domain']))
continue
data = json.dumps(record_to_import['data']) if isinstance(record_to_import['data'], dict) else record_to_import['data']
conditional_data = json.dumps(record_to_import['conditional_data']) if isinstance(record_to_import['conditional_data'], dict) else record_to_import['conditional_data']
self.__record_update_or_create(
zone_id,
record_to_import['ttl'],
record_to_import['cls'],
record_to_import['type'],
record_to_import['active'],
data,
record_to_import['is_conditional'],
record_to_import['conditional_count'],
record_to_import['conditional_limit'],
record_to_import['conditional_reset'],
conditional_data,
id=record_to_import['record_id'],
autocommit=False
)
if count % batch_size == 0:
db.session.commit()
db.session.commit()
return True
def __process_zones(self, zones, user, show_progressbar=False):
errors = []
items = []
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Processing zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
domain_mapping = self.__get_domain_mapping(user.id)
user_base_domain = '.' + self.__dns_zones.get_base_domain(user.admin, user.username)
count = 0
for zone in zones:
count += 1
bar.update(count) if show_progressbar else False
active = True if zone['active'] in ['1', 'yes', 'true'] else False
catch_all = True if zone['catch_all'] in ['1', 'yes', 'true'] else False
forwarding = True if zone['forwarding'] in ['1', 'yes', 'true'] else False
regex = True if zone['regex'] in ['1', 'yes', 'true'] else False
master = True if zone['master'] in ['1', 'yes', 'true'] else False
tags = zone['tags'].split(',')
# Trim each element.
map(str.strip, tags)
# Remove empty elements.
tags = list(filter(None, tags))
is_valid = True
if not user.admin:
if zone['domain'][-len(user_base_domain):] != user_base_domain and user_base_domain != '.' + zone['domain']:
is_valid = False
errors.append({'row': zone['row'], 'error': 'Zone {0} does not match your assigned master domain'.format(zone['domain'])})
if is_valid:
domain = {
'id': domain_mapping[zone['domain']] if zone['domain'] in domain_mapping else 0,
'domain': zone['domain'],
'active': active,
'catch_all': catch_all,
'forwarding': forwarding,
'regex': regex,
'master': master,
'tags': tags
}
items.append(domain)
return items, errors
def __process_records(self, records, user, show_progressbar=False):
errors = []
items = []
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Processing records')
bar = progressbar.ProgressBar(max_value=len(records), widgets=widget)
domain_mapping = self.__get_domain_mapping(user.id)
domain_mapping_reverse = self.__get_domain_mapping(user.id, reverse=True)
count = 0
for record in records:
count += 1
bar.update(count) if show_progressbar else False
record_errors = []
active = True if record['active'] in ['1', 'yes', 'true'] else False
zone_id = self.__process_record_zone(record, record_errors, domain_mapping)
record_id = self.__process_record_id(record, zone_id, record_errors, domain_mapping_reverse)
ttl = self.__process_record_ttl(record, record_errors)
cls = self.__process_record_cls(record, record_errors)
type = self.__process_record_type(record, record_errors)
is_conditional = True if record['is_conditional'] in ['1', 'yes', 'true'] else False
conditional_reset = True if record['conditional_reset'] in ['1', 'yes', 'true'] else False
conditional_count = self.__process_number(record, record_errors, 'conditional_count')
conditional_limit = self.__process_number(record, record_errors, 'conditional_limit')
data = {}
conditional_data = {}
if len(type) > 0:
data = self.__process_record_data(record, type, record_errors)
if is_conditional:
conditional_data = self.__process_record_data(record, type, record_errors, is_conditional=True)
if len(record_errors) == 0:
items.append({
'record_id': record_id,
'zone_id': zone_id,
'domain': record['domain'],
'active': active,
'ttl': ttl,
'cls': cls,
'type': type,
'data': data,
'is_conditional': is_conditional,
'conditional_count': conditional_count,
'conditional_limit': conditional_limit,
'conditional_reset': conditional_reset,
'conditional_data': conditional_data
})
else:
errors += record_errors
return items, errors
def __process_number(self, record, errors, attribute):
value = record[attribute]
if len(value) == 0 or value.isdigit() is False:
errors.append({'row': record['row'], 'error': 'Invalid attribute {0} value: {1}'.format(record[attribute], value)})
return 0
return int(value)
def __process_record_id(self, record, zone_id, errors, domain_mapping):
zone_id = zone_id if zone_id > 0 else None
record_id = 0
if len(record['id']) > 0:
if not record['id'].isdigit():
errors.append({'row': record['row'], 'error': 'Invalid record id: {0}'.format(record['id'])})
return 0
record_id = int(record['id'])
if record_id > 0:
record_exists = self.__record_exists(record_id, dns_zone_id=zone_id)
if not record_exists:
# Record not found - treat as new.
return 0
if zone_id > 0:
domain = domain_mapping[zone_id] if zone_id in domain_mapping else None
if not domain:
errors.append({'row': record['row'], 'error': 'Zone {0} not found'.format(record['domain'])})
return 0
if record['domain'] != domain:
errors.append({'row': record['row'], 'error': 'Record {0} does not belong to zone {1}'.format(record_id, zone_id)})
return 0
return record_id
def __process_record_zone(self, record, errors, domain_mapping):
zone_id = domain_mapping[record['domain']] if record['domain'] in domain_mapping else 0
if zone_id == 0:
errors.append({'row': record['row'], 'error': 'Zone not found: {0}'.format(record['domain'])})
return zone_id
def __record_exists(self, dns_record_id, dns_zone_id=None):
params = {'id': dns_record_id}
sql = "SELECT COUNT(id) AS c FROM dns_records WHERE id = :id"
if dns_zone_id is not None:
params['dns_zone_id'] = dns_zone_id
sql += " AND dns_zone_id = :dns_zone_id"
result = db.session.execute(sql, params).first()
return result[0] > 0 if result is not None else False
def __process_record_ttl(self, record, errors):
ttl = 0
if not record['ttl'].isdigit():
errors.append({'row': record['row'], 'error': 'Invalid TTL: {0}'.format(record['ttl'])})
else:
ttl = int(record['ttl'])
if ttl < 0:
errors.append({'row': record['row'], 'error': 'Invalid TTL: {0}'.format(record['ttl'])})
return ttl
def __process_record_cls(self, record, errors):
cls = ''
if not record['cls'] in self.__dns_records.get_classes():
errors.append({'row': record['row'], 'error': 'Invalid class: {0}'.format(record['cls'])})
else:
cls = record['cls']
return cls
def __process_record_type(self, record, errors):
type = ''
if not record['type'] in self.__dns_records.get_types():
errors.append({'row': record['row'], 'error': 'Invalid type: {0}'.format(record['type'])})
else:
type = record['type']
return type
def __properties_to_dict(self, record, errors, is_conditional=False):
attribute = 'conditional_data' if is_conditional else 'data'
rows = record[attribute].split("\n")
properties = {}
for row in rows:
parts = row.split('=', 1)
if len(parts) != 2:
errors.append({'row': record['row'], 'error': 'Invalid record property: {0}'.format(row)})
continue
name = parts[0].lower().strip()
value = parts[1].strip()
properties[name] = value
return properties
def __process_record_data(self, record, type, errors, is_conditional=False):
record_properties = self.__properties_to_dict(record, errors, is_conditional=is_conditional)
required_properties = self.__dns_records.get_record_type_properties(type, clean=True)
data = {}
for property_name, property_type in required_properties.items():
if not property_name in record_properties:
errors.append({'row': record['row'], 'error': 'Missing record property: {0}'.format(property_name)})
continue
value = record_properties[property_name]
if (property_type == 'int') and (isinstance(value, str)):
if not value.isdigit():
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
value = int(value)
if (property_type == 'str') and (len(value) == 0):
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
elif (property_type == 'int') and (value < 0):
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
data[property_name] = value
return data
def __categorise_rows(self, rows, type):
data = []
for i, row in enumerate(rows):
# Error row is +1 because the first row is the header which was removed.
actual_row = i + 1
if type == self.IMPORT_TYPE_ZONE:
data.append({
'row': actual_row,
'domain': row['domain'].strip().lower(),
'active': row['active'].strip().lower(),
'catch_all': row['catch_all'].strip().lower(),
'forwarding': row['forwarding'].strip().lower(),
'regex': row['regex'].strip().lower(),
'master': row['master'].strip().lower(),
'tags': row['tags'].strip()
})
elif type == self.IMPORT_TYPE_RECORD:
data.append({
'row': actual_row,
'domain': row['domain'].strip().lower(),
'id': row['id'].strip(),
'ttl': row['ttl'].strip().lower(),
'cls': row['cls'].strip().upper(),
'type': row['type'].strip().upper(),
'active': row['active'].strip().lower(),
'data': row['data'].strip(),
'is_conditional': row['is_conditional'].strip().lower(),
'conditional_count': row['conditional_count'].strip().lower(),
'conditional_limit': row['conditional_limit'].strip().lower(),
'conditional_reset': row['conditional_reset'].strip().lower(),
'conditional_data': row['conditional_data'].strip(),
})
return data
def __get_domain_mapping(self, user_id, reverse=False):
result = db.session.execute(
"SELECT id, domain FROM dns_zones WHERE user_id = :user_id",
{'user_id': user_id}
)
mapping = {}
for row in result:
if reverse:
mapping[row[0]] = row[1]
else:
mapping[row[1]] = row[0]
return mapping
def __get_tag_mapping(self, user_id):
result = db.session.execute(
"SELECT id, name FROM tags WHERE user_id = :user_id",
{'user_id': user_id}
)
mapping = {}
for row in result:
mapping[row[1]] = row[0]
return mapping
def __zone_update_or_create(self, domain, active, catch_all, forwarding, regex, master, user_id, id=None, autocommit=True):
params = {
'domain': domain,
'active': active,
'catch_all': catch_all,
'forwarding': forwarding,
'regex': regex,
'master': master,
'user_id': user_id,
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if (id is None) or (id == 0):
params['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO dns_zones (domain, active, catch_all, forwarding, regex, master, user_id, updated_at, created_at)" \
"VALUES(:domain, :active, :catch_all, :forwarding, :regex, :master, :user_id, :updated_at, :created_at)"
else:
params['id'] = id
sql = "UPDATE dns_zones SET domain = :domain, active = :active, catch_all = :catch_all, forwarding = :forwarding, regex = :regex, master = :master, user_id = :user_id, updated_at = :updated_at WHERE id = :id"
result = db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __record_update_or_create(self, zone_id, ttl, cls, type, active, data, is_conditional, conditional_count,
conditional_limit, conditional_reset, conditional_data, id=None, autocommit=True):
params = {
'zone_id': zone_id,
'ttl': ttl,
'cls': cls,
'type': type,
'active': active,
'data': data,
'has_conditional_responses': is_conditional,
'conditional_count': conditional_count,
'conditional_limit': conditional_limit,
'conditional_reset': conditional_reset,
'conditional_data': conditional_data,
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if (id is None) or (id == 0):
params['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO dns_records (dns_zone_id, ttl, cls, type, data, active, has_conditional_responses, conditional_count, conditional_limit, conditional_reset, conditional_data, updated_at, created_at) " \
"VALUES(:zone_id, :ttl, :cls, :type, :data, :active, :has_conditional_responses, :conditional_count, :conditional_limit, :conditional_reset, :conditional_data, :updated_at, :created_at)"
else:
params['id'] = id
sql = "UPDATE dns_records SET dns_zone_id = :zone_id, ttl = :ttl, cls = :cls, type = :type, data = :data, active = :active, has_conditional_responses = :has_conditional_responses, conditional_count = :conditional_count, conditional_limit = :conditional_limit, conditional_reset = :conditional_reset, conditional_data = :conditional_data, updated_at = :updated_at WHERE id = :id"
result = db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __tags_create(self, user_id, tags):
for tag in tags:
name = tag.strip().lower()
result = db.session.execute(
"SELECT id FROM tags WHERE name = :name AND user_id = :user_id",
{'name': name, 'user_id': user_id}
).first()
if result is None:
params = {
'user_id': user_id,
'name': tag,
'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
sql = "INSERT INTO tags (user_id, name, created_at, updated_at) VALUES(:user_id, :name, :created_at, :updated_at)"
db.session.execute(sql, params)
db.session.commit()
return True
def __zone_save_tags(self, zone_id, tags, autocommit=True):
for name, id in tags.items():
params = {
'dns_zone_id': zone_id,
'tag_id': id,
'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
sql = "INSERT INTO dns_zone_tags (dns_zone_id, tag_id, created_at, updated_at) VALUES(:dns_zone_id, :tag_id, :created_at, :updated_at)"
db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __zone_clear_tags(self, zone_ids, batch_size=100, show_progressbar=False, widget=None):
batches = list(self.__chunks(zone_ids, batch_size))
if show_progressbar:
widget[0] = progressbar.FormatLabel('Removing existing tags')
bar = progressbar.ProgressBar(max_value=len(batches), widgets=widget)
count = 0
for batch in batches:
count += 1
bar.update(count) if show_progressbar else False
i = 0
params = {}
for id in batch:
i += 1
params['param' + str(i)] = id
bind = [':' + v for v in params.keys()]
sql = "DELETE FROM dns_zone_tags WHERE dns_zone_id IN({0})".format(', '.join(bind))
db.session.execute(sql, params)
db.session.commit()
return True
def __chunks(self, data, size):
# From https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
for i in range(0, len(data), size):
yield data[i:i + size]
| 39.89019 | 390 | 0.562342 | from app.lib.dns.helpers.shared import SharedHelper
import os
import datetime
import json
import progressbar
from app import db
class DNSImportManager(SharedHelper):
IMPORT_TYPE_ZONE = 1
IMPORT_TYPE_RECORD = 2
@property
def last_error(self):
return self.__last_error
@last_error.setter
def last_error(self, value):
self.__last_error = value
def __init__(self, dns_zones, dns_records, users):
self.__last_error = ''
self.__dns_zones = dns_zones
self.__dns_records = dns_records
self.__zone_headers = ['domain', 'active', 'catch_all', 'forwarding', 'regex', 'master', 'tags']
self.__record_headers = ['domain', 'id', 'ttl', 'cls', 'type', 'active', 'data', 'is_conditional', 'conditional_count', 'conditional_limit', 'conditional_reset', 'conditional_data']
self.__users = users
def identify(self, csvfile):
self.last_error = ''
if not os.path.isfile(csvfile):
self.last_error = 'CSV file does not exist'
return False
header = self._load_csv_header(csvfile)
zone_header_count = 0
record_header_count = 0
for column in header:
if column in self.__zone_headers:
zone_header_count += 1
if column in self.__record_headers:
record_header_count += 1
if zone_header_count == len(self.__zone_headers):
return self.IMPORT_TYPE_ZONE
elif record_header_count == len(self.__record_headers):
return self.IMPORT_TYPE_RECORD
self.last_error = 'If you are uploading a ZONE file these are the required columns: {0}. If you are uploading a RECORD file then the required columns are: {1}.'.format(', '.join(self.__zone_headers), ', '.join(self.__record_headers))
return False
def review(self, csvfile, type, user_id, show_progressbar=False):
self.last_error = ''
if not os.path.isfile(csvfile):
self.last_error = 'CSV file does not exist'
return False
lines = self._load_csv(csvfile)
if len(lines) == 0:
self.last_error = 'CSV is empty'
return False
user = self.__users.get_user(user_id)
if not user:
self.last_error = 'Could not find user with ID {0}'.format(user_id)
return False
all_errors = []
errors = []
rows = []
if type == self.IMPORT_TYPE_ZONE:
rows = self.__categorise_rows(lines, type)
rows, errors = self.__process_zones(rows, user, show_progressbar=show_progressbar)
elif type == self.IMPORT_TYPE_RECORD:
rows = self.__categorise_rows(lines, type)
rows, errors = self.__process_records(rows, user, show_progressbar=show_progressbar)
all_errors += errors
all_errors = sorted(all_errors, key=lambda k: k['row'])
return {
'data': rows,
'errors': all_errors
}
def run(self, data, type, user_id, show_progressbar=False):
errors = []
if type == self.IMPORT_TYPE_ZONE:
self.__import_zones(data, user_id, show_progressbar=show_progressbar)
elif type == self.IMPORT_TYPE_RECORD:
self.__import_records(data, user_id, errors, show_progressbar=show_progressbar)
return errors if len(errors) > 0 else True
def __import_zones(self, zones, user_id, show_progressbar=False, batch_size=100):
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
count = 0
unique_tags = []
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
for zone_to_import in list(zones):
count += 1
bar.update(count) if show_progressbar else False
self.__zone_update_or_create(
zone_to_import['domain'],
zone_to_import['active'],
zone_to_import['catch_all'],
zone_to_import['forwarding'],
zone_to_import['regex'],
zone_to_import['master'],
user_id,
id=zone_to_import['id'],
autocommit=False
)
if count % batch_size == 0:
db.session.commit()
unique_tags = list(set(unique_tags + zone_to_import['tags']))
db.session.commit()
if show_progressbar:
widget[0] = progressbar.FormatLabel('Re-mapping zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
domain_mapping = self.__get_domain_mapping(user_id)
zone_ids = []
i = 0
for zone_to_import in list(zones):
i += 1
bar.update(i) if show_progressbar else False
zone_to_import['id'] = domain_mapping[zone_to_import['domain']] if zone_to_import['domain'] in domain_mapping else 0
zone_ids.append(zone_to_import['id'])
self.__zone_clear_tags(zone_ids, show_progressbar=show_progressbar, widget=widget)
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing tags')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
self.__tags_create(user_id, unique_tags)
tag_mapping = self.__get_tag_mapping(user_id)
count = 0
for zone_to_import in list(zones):
count += 1
bar.update(count) if show_progressbar else False
tags = {}
for tag in zone_to_import['tags']:
tags[tag] = tag_mapping[tag]
self.__zone_save_tags(zone_to_import['id'], tags, autocommit=False)
if count % batch_size == 0:
db.session.commit()
db.session.commit()
return True
def __import_records(self, records, user_id, errors, show_progressbar=False, batch_size = 100):
domain_mapping = self.__get_domain_mapping(user_id)
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Importing records')
bar = progressbar.ProgressBar(max_value=len(records), widgets=widget)
count = 0
for record_to_import in records:
count += 1
bar.update(count) if show_progressbar else False
zone_id = domain_mapping[record_to_import['domain']] if record_to_import['domain'] in domain_mapping else None
if not zone_id:
errors.append('Could not find zone: {0}'.format(record_to_import['domain']))
continue
data = json.dumps(record_to_import['data']) if isinstance(record_to_import['data'], dict) else record_to_import['data']
conditional_data = json.dumps(record_to_import['conditional_data']) if isinstance(record_to_import['conditional_data'], dict) else record_to_import['conditional_data']
self.__record_update_or_create(
zone_id,
record_to_import['ttl'],
record_to_import['cls'],
record_to_import['type'],
record_to_import['active'],
data,
record_to_import['is_conditional'],
record_to_import['conditional_count'],
record_to_import['conditional_limit'],
record_to_import['conditional_reset'],
conditional_data,
id=record_to_import['record_id'],
autocommit=False
)
if count % batch_size == 0:
db.session.commit()
db.session.commit()
return True
def __process_zones(self, zones, user, show_progressbar=False):
errors = []
items = []
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Processing zones')
bar = progressbar.ProgressBar(max_value=len(zones), widgets=widget)
domain_mapping = self.__get_domain_mapping(user.id)
user_base_domain = '.' + self.__dns_zones.get_base_domain(user.admin, user.username)
count = 0
for zone in zones:
count += 1
bar.update(count) if show_progressbar else False
active = True if zone['active'] in ['1', 'yes', 'true'] else False
catch_all = True if zone['catch_all'] in ['1', 'yes', 'true'] else False
forwarding = True if zone['forwarding'] in ['1', 'yes', 'true'] else False
regex = True if zone['regex'] in ['1', 'yes', 'true'] else False
master = True if zone['master'] in ['1', 'yes', 'true'] else False
tags = zone['tags'].split(',')
map(str.strip, tags)
tags = list(filter(None, tags))
is_valid = True
if not user.admin:
if zone['domain'][-len(user_base_domain):] != user_base_domain and user_base_domain != '.' + zone['domain']:
is_valid = False
errors.append({'row': zone['row'], 'error': 'Zone {0} does not match your assigned master domain'.format(zone['domain'])})
if is_valid:
domain = {
'id': domain_mapping[zone['domain']] if zone['domain'] in domain_mapping else 0,
'domain': zone['domain'],
'active': active,
'catch_all': catch_all,
'forwarding': forwarding,
'regex': regex,
'master': master,
'tags': tags
}
items.append(domain)
return items, errors
def __process_records(self, records, user, show_progressbar=False):
errors = []
items = []
widget = [
progressbar.FormatLabel(''),
' ',
progressbar.Percentage(),
' ',
progressbar.Bar('#'),
' ',
progressbar.RotatingMarker(),
' ',
progressbar.ETA()
]
if show_progressbar:
widget[0] = progressbar.FormatLabel('Processing records')
bar = progressbar.ProgressBar(max_value=len(records), widgets=widget)
domain_mapping = self.__get_domain_mapping(user.id)
domain_mapping_reverse = self.__get_domain_mapping(user.id, reverse=True)
count = 0
for record in records:
count += 1
bar.update(count) if show_progressbar else False
record_errors = []
active = True if record['active'] in ['1', 'yes', 'true'] else False
zone_id = self.__process_record_zone(record, record_errors, domain_mapping)
record_id = self.__process_record_id(record, zone_id, record_errors, domain_mapping_reverse)
ttl = self.__process_record_ttl(record, record_errors)
cls = self.__process_record_cls(record, record_errors)
type = self.__process_record_type(record, record_errors)
is_conditional = True if record['is_conditional'] in ['1', 'yes', 'true'] else False
conditional_reset = True if record['conditional_reset'] in ['1', 'yes', 'true'] else False
conditional_count = self.__process_number(record, record_errors, 'conditional_count')
conditional_limit = self.__process_number(record, record_errors, 'conditional_limit')
data = {}
conditional_data = {}
if len(type) > 0:
data = self.__process_record_data(record, type, record_errors)
if is_conditional:
conditional_data = self.__process_record_data(record, type, record_errors, is_conditional=True)
if len(record_errors) == 0:
items.append({
'record_id': record_id,
'zone_id': zone_id,
'domain': record['domain'],
'active': active,
'ttl': ttl,
'cls': cls,
'type': type,
'data': data,
'is_conditional': is_conditional,
'conditional_count': conditional_count,
'conditional_limit': conditional_limit,
'conditional_reset': conditional_reset,
'conditional_data': conditional_data
})
else:
errors += record_errors
return items, errors
def __process_number(self, record, errors, attribute):
value = record[attribute]
if len(value) == 0 or value.isdigit() is False:
errors.append({'row': record['row'], 'error': 'Invalid attribute {0} value: {1}'.format(record[attribute], value)})
return 0
return int(value)
def __process_record_id(self, record, zone_id, errors, domain_mapping):
zone_id = zone_id if zone_id > 0 else None
record_id = 0
if len(record['id']) > 0:
if not record['id'].isdigit():
errors.append({'row': record['row'], 'error': 'Invalid record id: {0}'.format(record['id'])})
return 0
record_id = int(record['id'])
if record_id > 0:
record_exists = self.__record_exists(record_id, dns_zone_id=zone_id)
if not record_exists:
return 0
if zone_id > 0:
domain = domain_mapping[zone_id] if zone_id in domain_mapping else None
if not domain:
errors.append({'row': record['row'], 'error': 'Zone {0} not found'.format(record['domain'])})
return 0
if record['domain'] != domain:
errors.append({'row': record['row'], 'error': 'Record {0} does not belong to zone {1}'.format(record_id, zone_id)})
return 0
return record_id
def __process_record_zone(self, record, errors, domain_mapping):
zone_id = domain_mapping[record['domain']] if record['domain'] in domain_mapping else 0
if zone_id == 0:
errors.append({'row': record['row'], 'error': 'Zone not found: {0}'.format(record['domain'])})
return zone_id
def __record_exists(self, dns_record_id, dns_zone_id=None):
params = {'id': dns_record_id}
sql = "SELECT COUNT(id) AS c FROM dns_records WHERE id = :id"
if dns_zone_id is not None:
params['dns_zone_id'] = dns_zone_id
sql += " AND dns_zone_id = :dns_zone_id"
result = db.session.execute(sql, params).first()
return result[0] > 0 if result is not None else False
def __process_record_ttl(self, record, errors):
ttl = 0
if not record['ttl'].isdigit():
errors.append({'row': record['row'], 'error': 'Invalid TTL: {0}'.format(record['ttl'])})
else:
ttl = int(record['ttl'])
if ttl < 0:
errors.append({'row': record['row'], 'error': 'Invalid TTL: {0}'.format(record['ttl'])})
return ttl
def __process_record_cls(self, record, errors):
cls = ''
if not record['cls'] in self.__dns_records.get_classes():
errors.append({'row': record['row'], 'error': 'Invalid class: {0}'.format(record['cls'])})
else:
cls = record['cls']
return cls
def __process_record_type(self, record, errors):
type = ''
if not record['type'] in self.__dns_records.get_types():
errors.append({'row': record['row'], 'error': 'Invalid type: {0}'.format(record['type'])})
else:
type = record['type']
return type
def __properties_to_dict(self, record, errors, is_conditional=False):
attribute = 'conditional_data' if is_conditional else 'data'
rows = record[attribute].split("\n")
properties = {}
for row in rows:
parts = row.split('=', 1)
if len(parts) != 2:
errors.append({'row': record['row'], 'error': 'Invalid record property: {0}'.format(row)})
continue
name = parts[0].lower().strip()
value = parts[1].strip()
properties[name] = value
return properties
def __process_record_data(self, record, type, errors, is_conditional=False):
record_properties = self.__properties_to_dict(record, errors, is_conditional=is_conditional)
required_properties = self.__dns_records.get_record_type_properties(type, clean=True)
data = {}
for property_name, property_type in required_properties.items():
if not property_name in record_properties:
errors.append({'row': record['row'], 'error': 'Missing record property: {0}'.format(property_name)})
continue
value = record_properties[property_name]
if (property_type == 'int') and (isinstance(value, str)):
if not value.isdigit():
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
value = int(value)
if (property_type == 'str') and (len(value) == 0):
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
elif (property_type == 'int') and (value < 0):
errors.append({'row': record['row'], 'error': "Invalid value '{0}' for property '{1}'".format(value, property_name)})
continue
data[property_name] = value
return data
def __categorise_rows(self, rows, type):
data = []
for i, row in enumerate(rows):
actual_row = i + 1
if type == self.IMPORT_TYPE_ZONE:
data.append({
'row': actual_row,
'domain': row['domain'].strip().lower(),
'active': row['active'].strip().lower(),
'catch_all': row['catch_all'].strip().lower(),
'forwarding': row['forwarding'].strip().lower(),
'regex': row['regex'].strip().lower(),
'master': row['master'].strip().lower(),
'tags': row['tags'].strip()
})
elif type == self.IMPORT_TYPE_RECORD:
data.append({
'row': actual_row,
'domain': row['domain'].strip().lower(),
'id': row['id'].strip(),
'ttl': row['ttl'].strip().lower(),
'cls': row['cls'].strip().upper(),
'type': row['type'].strip().upper(),
'active': row['active'].strip().lower(),
'data': row['data'].strip(),
'is_conditional': row['is_conditional'].strip().lower(),
'conditional_count': row['conditional_count'].strip().lower(),
'conditional_limit': row['conditional_limit'].strip().lower(),
'conditional_reset': row['conditional_reset'].strip().lower(),
'conditional_data': row['conditional_data'].strip(),
})
return data
def __get_domain_mapping(self, user_id, reverse=False):
result = db.session.execute(
"SELECT id, domain FROM dns_zones WHERE user_id = :user_id",
{'user_id': user_id}
)
mapping = {}
for row in result:
if reverse:
mapping[row[0]] = row[1]
else:
mapping[row[1]] = row[0]
return mapping
def __get_tag_mapping(self, user_id):
result = db.session.execute(
"SELECT id, name FROM tags WHERE user_id = :user_id",
{'user_id': user_id}
)
mapping = {}
for row in result:
mapping[row[1]] = row[0]
return mapping
def __zone_update_or_create(self, domain, active, catch_all, forwarding, regex, master, user_id, id=None, autocommit=True):
params = {
'domain': domain,
'active': active,
'catch_all': catch_all,
'forwarding': forwarding,
'regex': regex,
'master': master,
'user_id': user_id,
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if (id is None) or (id == 0):
params['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO dns_zones (domain, active, catch_all, forwarding, regex, master, user_id, updated_at, created_at)" \
"VALUES(:domain, :active, :catch_all, :forwarding, :regex, :master, :user_id, :updated_at, :created_at)"
else:
params['id'] = id
sql = "UPDATE dns_zones SET domain = :domain, active = :active, catch_all = :catch_all, forwarding = :forwarding, regex = :regex, master = :master, user_id = :user_id, updated_at = :updated_at WHERE id = :id"
result = db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __record_update_or_create(self, zone_id, ttl, cls, type, active, data, is_conditional, conditional_count,
conditional_limit, conditional_reset, conditional_data, id=None, autocommit=True):
params = {
'zone_id': zone_id,
'ttl': ttl,
'cls': cls,
'type': type,
'active': active,
'data': data,
'has_conditional_responses': is_conditional,
'conditional_count': conditional_count,
'conditional_limit': conditional_limit,
'conditional_reset': conditional_reset,
'conditional_data': conditional_data,
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
if (id is None) or (id == 0):
params['created_at'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
sql = "INSERT INTO dns_records (dns_zone_id, ttl, cls, type, data, active, has_conditional_responses, conditional_count, conditional_limit, conditional_reset, conditional_data, updated_at, created_at) " \
"VALUES(:zone_id, :ttl, :cls, :type, :data, :active, :has_conditional_responses, :conditional_count, :conditional_limit, :conditional_reset, :conditional_data, :updated_at, :created_at)"
else:
params['id'] = id
sql = "UPDATE dns_records SET dns_zone_id = :zone_id, ttl = :ttl, cls = :cls, type = :type, data = :data, active = :active, has_conditional_responses = :has_conditional_responses, conditional_count = :conditional_count, conditional_limit = :conditional_limit, conditional_reset = :conditional_reset, conditional_data = :conditional_data, updated_at = :updated_at WHERE id = :id"
result = db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __tags_create(self, user_id, tags):
for tag in tags:
name = tag.strip().lower()
result = db.session.execute(
"SELECT id FROM tags WHERE name = :name AND user_id = :user_id",
{'name': name, 'user_id': user_id}
).first()
if result is None:
params = {
'user_id': user_id,
'name': tag,
'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
sql = "INSERT INTO tags (user_id, name, created_at, updated_at) VALUES(:user_id, :name, :created_at, :updated_at)"
db.session.execute(sql, params)
db.session.commit()
return True
def __zone_save_tags(self, zone_id, tags, autocommit=True):
for name, id in tags.items():
params = {
'dns_zone_id': zone_id,
'tag_id': id,
'created_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'updated_at': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
}
sql = "INSERT INTO dns_zone_tags (dns_zone_id, tag_id, created_at, updated_at) VALUES(:dns_zone_id, :tag_id, :created_at, :updated_at)"
db.session.execute(sql, params)
if autocommit:
db.session.commit()
return True
def __zone_clear_tags(self, zone_ids, batch_size=100, show_progressbar=False, widget=None):
batches = list(self.__chunks(zone_ids, batch_size))
if show_progressbar:
widget[0] = progressbar.FormatLabel('Removing existing tags')
bar = progressbar.ProgressBar(max_value=len(batches), widgets=widget)
count = 0
for batch in batches:
count += 1
bar.update(count) if show_progressbar else False
i = 0
params = {}
for id in batch:
i += 1
params['param' + str(i)] = id
bind = [':' + v for v in params.keys()]
sql = "DELETE FROM dns_zone_tags WHERE dns_zone_id IN({0})".format(', '.join(bind))
db.session.execute(sql, params)
db.session.commit()
return True
def __chunks(self, data, size):
for i in range(0, len(data), size):
yield data[i:i + size]
| true | true |
f724a1382e8af5cf9306be07c058f0768f836073 | 807 | py | Python | VetsApp/migrations/0001_initial.py | Sabrinax3/Pet-Clinic-1 | 776955d118a46c8d4eaa74de22ea0280b82debc9 | [
"MIT"
] | 2 | 2020-04-13T14:26:54.000Z | 2022-01-19T01:30:25.000Z | VetsApp/migrations/0001_initial.py | Sabrinax3/Pet-Clinic-1 | 776955d118a46c8d4eaa74de22ea0280b82debc9 | [
"MIT"
] | 2 | 2020-05-29T18:52:55.000Z | 2020-05-30T02:06:28.000Z | VetsApp/migrations/0001_initial.py | Sabrinax3/Pet-Clinic-1 | 776955d118a46c8d4eaa74de22ea0280b82debc9 | [
"MIT"
] | 8 | 2020-04-11T08:30:44.000Z | 2020-05-30T03:26:13.000Z | # Generated by Django 3.0.5 on 2020-04-10 10:01
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=100)),
('ContactNo', models.CharField(max_length=30)),
('Address', models.CharField(max_length=200)),
('University', models.CharField(max_length=100)),
('HighestDegree', models.CharField(max_length=50)),
('Image', models.CharField(max_length=1000)),
],
),
]
| 29.888889 | 114 | 0.570012 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vets',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Name', models.CharField(max_length=100)),
('ContactNo', models.CharField(max_length=30)),
('Address', models.CharField(max_length=200)),
('University', models.CharField(max_length=100)),
('HighestDegree', models.CharField(max_length=50)),
('Image', models.CharField(max_length=1000)),
],
),
]
| true | true |
f724a2c802c8c96a90155f3e5f0760fede5be41e | 4,704 | py | Python | stanCode_Projects/name_searching_system/babynames.py | calvin0123/sc-projects | 88ac98e3543a1399387c2033f36dc5c6b86c488c | [
"MIT"
] | null | null | null | stanCode_Projects/name_searching_system/babynames.py | calvin0123/sc-projects | 88ac98e3543a1399387c2033f36dc5c6b86c488c | [
"MIT"
] | null | null | null | stanCode_Projects/name_searching_system/babynames.py | calvin0123/sc-projects | 88ac98e3543a1399387c2033f36dc5c6b86c488c | [
"MIT"
] | 1 | 2021-12-04T22:50:23.000Z | 2021-12-04T22:50:23.000Z | """
SC101 Baby Names Project
Adapted from Nick Parlante's Baby Names assignment by
Jerry Liao.
------------------------------------------
File: babynames.py
Name: Calvin Chen
This file reads the most famous baby names from 1900 to
2010 in the US and stores the .txt into the dictionary
to provide the information for the babygraphics.py.
"""
import sys
def add_data_for_name(name_data, year, rank, name):
"""
Adds the given year and rank to the associated name in the name_data dict.
Input:
name_data (dict): dict holding baby name data
year (str): the year of the data entry to add
rank (str): the rank of the data entry to add
name (str): the name of the data entry to add
Output:
This function modifies the name_data dict to store the provided
name, year, and rank. This function does not return any values.
"""
if name not in name_data:
name_data[name] = {}
name_data[name][year] = rank
else:
if year in name_data[name]:
if int(name_data[name][year]) < int(rank):
return name_data
else:
name_data[name][year] = rank
else:
name_data[name][year] = rank
def add_file(name_data, filename):
"""
Reads the information from the specified file and populates the name_data
dict with the data found in the file.
Input:
name_data (dict): dict holding baby name data
filename (str): name of the file holding baby name data
Output:
This function modifies the name_data dict to store information from
the provided file name. This function does not return any value.
"""
with open(filename, 'r') as f:
for line in f:
line_l = line.split(',')
if len(line_l) > 1:
rank = line_l[0].strip()
name1 = line_l[1].strip()
name2 = line_l[2].strip()
add_data_for_name(name_data, year, rank, name1)
add_data_for_name(name_data, year, rank, name2)
else:
year = line_l[0].strip()
def read_files(filenames):
"""
Reads the data from all files specified in the provided list
into a single name_data dict and then returns that dict.
Input:
filenames (List[str]): a list of filenames containing baby name data
Returns:
name_data (dict): the dict storing all baby name data in a structured manner
"""
name_data = {}
for file in filenames:
add_file(name_data, file)
return name_data
def search_names(name_data, target):
"""
Given a name_data dict that stores baby name information and a target string,
returns a list of all names in the dict that contain the target string. This
function should be case-insensitive with respect to the target string.
Input:
name_data (dict): a dict containing baby name data organized by name
target (str): a string to look for in the names contained within name_data
Returns:
matching_names (List[str]): a list of all names from name_data that contain
the target string
"""
names = []
for name in name_data:
if target in name.lower():
names.append(name)
return names
def print_names(name_data):
"""
(provided, DO NOT MODIFY)
Given a name_data dict, print out all its data, one name per line.
The names are printed in alphabetical order,
with the corresponding years data displayed in increasing order.
Input:
name_data (dict): a dict containing baby name data organized by name
Returns:
This function does not return anything
"""
for key, value in sorted(name_data.items()):
print(key, sorted(value.items()))
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# Two command line forms
# 1. file1 file2 file3 ..
# 2. -search target file1 file2 file3 ..
# Assume no search, so list of filenames to read
# is the args list
filenames = args
# Check if we are doing search, set target variable
target = ''
if len(args) >= 2 and args[0] == '-search':
target = args[1]
filenames = args[2:] # Update filenames to skip first 2
# Read in all the filenames: baby-1990.txt, baby-2000.txt, ...
names = read_files(filenames)
# Either we do a search or just print everything.
if len(target) > 0:
search_results = search_names(names, target)
for name in search_results:
print(name)
else:
print_names(names)
if __name__ == '__main__':
main()
| 29.961783 | 84 | 0.624787 |
import sys
def add_data_for_name(name_data, year, rank, name):
if name not in name_data:
name_data[name] = {}
name_data[name][year] = rank
else:
if year in name_data[name]:
if int(name_data[name][year]) < int(rank):
return name_data
else:
name_data[name][year] = rank
else:
name_data[name][year] = rank
def add_file(name_data, filename):
with open(filename, 'r') as f:
for line in f:
line_l = line.split(',')
if len(line_l) > 1:
rank = line_l[0].strip()
name1 = line_l[1].strip()
name2 = line_l[2].strip()
add_data_for_name(name_data, year, rank, name1)
add_data_for_name(name_data, year, rank, name2)
else:
year = line_l[0].strip()
def read_files(filenames):
name_data = {}
for file in filenames:
add_file(name_data, file)
return name_data
def search_names(name_data, target):
names = []
for name in name_data:
if target in name.lower():
names.append(name)
return names
def print_names(name_data):
for key, value in sorted(name_data.items()):
print(key, sorted(value.items()))
def main():
args = sys.argv[1:]
filenames = args
target = ''
if len(args) >= 2 and args[0] == '-search':
target = args[1]
filenames = args[2:]
names = read_files(filenames)
if len(target) > 0:
search_results = search_names(names, target)
for name in search_results:
print(name)
else:
print_names(names)
if __name__ == '__main__':
main()
| true | true |
f724a33ad866379ea4c6e3d1a4ecff9dfb612aba | 16,136 | py | Python | flower/api/tasks.py | jiangrz/flower | 4d6fad197e97c9c36f8052345a348345ef4505a3 | [
"BSD-3-Clause"
] | null | null | null | flower/api/tasks.py | jiangrz/flower | 4d6fad197e97c9c36f8052345a348345ef4505a3 | [
"BSD-3-Clause"
] | null | null | null | flower/api/tasks.py | jiangrz/flower | 4d6fad197e97c9c36f8052345a348345ef4505a3 | [
"BSD-3-Clause"
] | 1 | 2021-04-23T17:34:09.000Z | 2021-04-23T17:34:09.000Z | from __future__ import absolute_import
import json
import logging
from datetime import datetime
from threading import Thread
from tornado import web
from tornado import gen
from tornado.escape import json_decode
from tornado.web import HTTPError
from celery import states
from celery.result import AsyncResult
from celery.contrib.abortable import AbortableAsyncResult
from celery.backends.base import DisabledBackend
from ..utils import tasks
from ..views import BaseHandler
from ..utils.broker import Broker
from ..api.control import ControlHandler
logger = logging.getLogger(__name__)
class BaseTaskHandler(BaseHandler):
def get_task_args(self):
try:
body = self.request.body
options = json_decode(body) if body else {}
except ValueError as e:
raise HTTPError(400, str(e))
args = options.pop('args', [])
kwargs = options.pop('kwargs', {})
if not isinstance(args, (list, tuple)):
raise HTTPError(400, 'args must be an array')
return args, kwargs, options
@staticmethod
def backend_configured(result):
return not isinstance(result.backend, DisabledBackend)
def write_error(self, status_code, **kwargs):
self.set_status(status_code)
def update_response_result(self, response, result):
if result.state == states.FAILURE:
response.update({'result': self.safe_result(result.result),
'traceback': result.traceback})
else:
response.update({'result': self.safe_result(result.result)})
def normalize_options(self, options):
if 'eta' in options:
options['eta'] = datetime.strptime(options['eta'],
self.DATE_FORMAT)
if 'countdown' in options:
options['countdown'] = float(options['countdown'])
if 'expires' in options:
expires = options['expires']
try:
expires = float(expires)
except ValueError:
expires = datetime.strptime(expires, self.DATE_FORMAT)
options['expires'] = expires
def safe_result(self, result):
"returns json encodable result"
try:
json.dumps(result)
except TypeError:
return repr(result)
else:
return result
class TaskApply(BaseTaskHandler):
@web.authenticated
@web.asynchronous
def post(self, taskname):
"""
Execute a task by name and wait results
**Example request**:
.. sourcecode:: http
POST /api/task/apply/tasks.add HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate, compress
Content-Length: 16
Content-Type: application/json; charset=utf-8
Host: localhost:5555
{
"args": [1, 2]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 71
Content-Type: application/json; charset=UTF-8
{
"state": "SUCCESS",
"task-id": "c60be250-fe52-48df-befb-ac66174076e6",
"result": 3
}
:query args: a list of arguments
:query kwargs: a dictionary of arguments
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
args, kwargs, options = self.get_task_args()
logger.debug("Invoking a task '%s' with '%s' and '%s'",
taskname, args, kwargs)
try:
task = self.capp.tasks[taskname]
except KeyError:
raise HTTPError(404, "Unknown task '%s'" % taskname)
try:
self.normalize_options(options)
except ValueError:
raise HTTPError(400, 'Invalid option')
result = task.apply_async(args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
# In tornado for not blocking event loop we must return results
# from other thread by self.finish()
th = Thread(target=self.wait_results, args=(result, response, ))
th.start()
# So just exit
def wait_results(self, result, response):
# Wait until task finished and do not raise anything
result.get(propagate=False)
# Write results and finish async function
self.update_response_result(response, result)
if self.backend_configured(result):
response.update(state=result.state)
self.finish(response)
class TaskAsyncApply(BaseTaskHandler):
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
@web.authenticated
def post(self, taskname):
"""
Execute a task
**Example request**:
.. sourcecode:: http
POST /api/task/async-apply/tasks.add HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate, compress
Content-Length: 16
Content-Type: application/json; charset=utf-8
Host: localhost:5555
{
"args": [1, 2]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 71
Content-Type: application/json; charset=UTF-8
Date: Sun, 13 Apr 2014 15:55:00 GMT
{
"state": "PENDING",
"task-id": "abc300c7-2922-4069-97b6-a635cc2ac47c"
}
:query args: a list of arguments
:query kwargs: a dictionary of arguments
:query options: a dictionary of `apply_async` keyword arguments
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
args, kwargs, options = self.get_task_args()
logger.debug("Invoking a task '%s' with '%s' and '%s'",
taskname, args, kwargs)
try:
task = self.capp.tasks[taskname]
except KeyError:
raise HTTPError(404, "Unknown task '%s'" % taskname)
try:
self.normalize_options(options)
except ValueError:
raise HTTPError(400, 'Invalid option')
result = task.apply_async(args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
if self.backend_configured(result):
response.update(state=result.state)
self.write(response)
class TaskSend(BaseTaskHandler):
@web.authenticated
def post(self, taskname):
"""
Execute a task by name (doesn't require task sources)
**Example request**:
.. sourcecode:: http
POST /api/task/send-task/tasks.add HTTP/1.1
Accept: application/json
Accept-Encoding: gzip, deflate, compress
Content-Length: 16
Content-Type: application/json; charset=utf-8
Host: localhost:5555
{
"args": [1, 2]
}
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 71
Content-Type: application/json; charset=UTF-8
{
"state": "SUCCESS",
"task-id": "c60be250-fe52-48df-befb-ac66174076e6"
}
:query args: a list of arguments
:query kwargs: a dictionary of arguments
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
args, kwargs, options = self.get_task_args()
logger.debug("Invoking task '%s' with '%s' and '%s'",
taskname, args, kwargs)
result = self.capp.send_task(
taskname, args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
if self.backend_configured(result):
response.update(state=result.state)
self.write(response)
class TaskResult(BaseTaskHandler):
@web.authenticated
def get(self, taskid):
"""
Get a task result
**Example request**:
.. sourcecode:: http
GET /api/task/result/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 84
Content-Type: application/json; charset=UTF-8
{
"result": 3,
"state": "SUCCESS",
"task-id": "c60be250-fe52-48df-befb-ac66174076e6"
}
:query timeout: how long to wait, in seconds, before the operation times out
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 503: result backend is not configured
"""
timeout = self.get_argument('timeout', None)
timeout = float(timeout) if timeout is not None else None
result = AsyncResult(taskid)
if not self.backend_configured(result):
raise HTTPError(503)
response = {'task-id': taskid, 'state': result.state}
if timeout:
result.get(timeout=timeout, propagate=False)
self.update_response_result(response, result)
elif result.ready():
self.update_response_result(response, result)
self.write(response)
class TaskAbort(BaseTaskHandler):
@web.authenticated
def post(self, taskid):
"""
Abort a running task
**Example request**:
.. sourcecode:: http
POST /api/task/abort/c60be250-fe52-48df-befb-ac66174076e6 HTTP/1.1
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 61
Content-Type: application/json; charset=UTF-8
{
"message": "Aborted '1480b55c-b8b2-462c-985e-24af3e9158f9'"
}
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 503: result backend is not configured
"""
logger.info("Aborting task '%s'", taskid)
result = AbortableAsyncResult(taskid)
if not self.backend_configured(result):
raise HTTPError(503)
result.abort()
self.write(dict(message="Aborted '%s'" % taskid))
class GetQueueLengths(BaseTaskHandler):
@web.authenticated
@gen.coroutine
def get(self):
app = self.application
broker_options = self.capp.conf.BROKER_TRANSPORT_OPTIONS
http_api = None
if app.transport == 'amqp' and app.options.broker_api:
http_api = app.options.broker_api
broker = Broker(app.capp.connection().as_uri(include_password=True),
http_api=http_api, broker_options=broker_options)
queue_names = ControlHandler.get_active_queue_names()
if not queue_names:
queue_names = set([self.capp.conf.CELERY_DEFAULT_QUEUE])
queues = yield broker.queues(sorted(queue_names))
self.write({'active_queues': queues})
class ListTasks(BaseTaskHandler):
@web.authenticated
def get(self):
"""
List tasks
**Example request**:
.. sourcecode:: http
GET /api/tasks HTTP/1.1
Host: localhost:5555
User-Agent: HTTPie/0.8.0
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 1109
Content-Type: application/json; charset=UTF-8
Etag: "b2478118015c8b825f7b88ce6b660e5449746c37"
Server: TornadoServer/3.1.1
{
"e42ceb2d-8730-47b5-8b4d-8e0d2a1ef7c9": {
"args": "[3, 4]",
"client": null,
"clock": 1079,
"eta": null,
"exception": null,
"exchange": null,
"expires": null,
"failed": null,
"kwargs": "{}",
"name": "tasks.add",
"received": 1398505411.107885,
"result": "'7'",
"retried": null,
"retries": 0,
"revoked": null,
"routing_key": null,
"runtime": 0.01610181899741292,
"sent": null,
"started": 1398505411.108985,
"state": "SUCCESS",
"succeeded": 1398505411.124802,
"timestamp": 1398505411.124802,
"traceback": null,
"uuid": "e42ceb2d-8730-47b5-8b4d-8e0d2a1ef7c9"
},
"f67ea225-ae9e-42a8-90b0-5de0b24507e0": {
"args": "[1, 2]",
"client": null,
"clock": 1042,
"eta": null,
"exception": null,
"exchange": null,
"expires": null,
"failed": null,
"kwargs": "{}",
"name": "tasks.add",
"received": 1398505395.327208,
"result": "'3'",
"retried": null,
"retries": 0,
"revoked": null,
"routing_key": null,
"runtime": 0.012884548006695695,
"sent": null,
"started": 1398505395.3289,
"state": "SUCCESS",
"succeeded": 1398505395.341089,
"timestamp": 1398505395.341089,
"traceback": null,
"uuid": "f67ea225-ae9e-42a8-90b0-5de0b24507e0"
}
}
:query limit: maximum number of tasks
:query workername: filter task by workername
:query taskname: filter tasks by taskname
:query state: filter tasks by state
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
"""
app = self.application
limit = self.get_argument('limit', None)
worker = self.get_argument('workername', None)
type = self.get_argument('taskname', None)
state = self.get_argument('state', None)
limit = limit and int(limit)
worker = worker if worker != 'All' else None
type = type if type != 'All' else None
state = state if state != 'All' else None
result = []
for task_id, task in tasks.iter_tasks(
app.events, limit=limit, type=type,
worker=worker, state=state):
task = tasks.as_dict(task)
task.pop('worker', None)
result.append((task_id, task))
self.write(dict(result))
class ListTaskTypes(BaseTaskHandler):
@web.authenticated
def get(self):
"""
List (seen) task types
**Example request**:
.. sourcecode:: http
GET /api/task/types HTTP/1.1
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 44
Content-Type: application/json; charset=UTF-8
{
"task-types": [
"tasks.add",
"tasks.sleep"
]
}
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
"""
seen_task_types = self.application.events.state.task_types()
response = {}
response['task-types'] = seen_task_types
self.write(response)
class TaskInfo(BaseTaskHandler):
@web.authenticated
def get(self, taskid):
"""
Get a task info
**Example request**:
.. sourcecode:: http
GET /api/task/info/91396550-c228-4111-9da4-9d88cfd5ddc6 HTTP/1.1
Accept: */*
Accept-Encoding: gzip, deflate, compress
Host: localhost:5555
**Example response**:
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Length: 575
Content-Type: application/json; charset=UTF-8
{
"args": "[2, 2]",
"client": null,
"clock": 25,
"eta": null,
"exception": null,
"exchange": null,
"expires": null,
"failed": null,
"kwargs": "{}",
"name": "tasks.add",
"received": 1400806241.970742,
"result": "'4'",
"retried": null,
"retries": null,
"revoked": null,
"routing_key": null,
"runtime": 2.0037889280356467,
"sent": null,
"started": 1400806241.972624,
"state": "SUCCESS",
"succeeded": 1400806243.975336,
"task-id": "91396550-c228-4111-9da4-9d88cfd5ddc6",
"timestamp": 1400806243.975336,
"traceback": null,
"worker": "celery@worker1"
}
:reqheader Authorization: optional OAuth token to authenticate
:statuscode 200: no error
:statuscode 401: unauthorized request
:statuscode 404: unknown task
"""
task = tasks.get_task_by_id(self.application.events, taskid)
if not task:
raise HTTPError(404, "Unknown task '%s'" % taskid)
response = {}
for name in task._fields:
if name not in ['uuid', 'worker']:
response[name] = getattr(task, name, None)
response['task-id'] = task.uuid
if task.worker is not None:
response['worker'] = task.worker.hostname
self.write(response)
| 26.715232 | 76 | 0.622707 | from __future__ import absolute_import
import json
import logging
from datetime import datetime
from threading import Thread
from tornado import web
from tornado import gen
from tornado.escape import json_decode
from tornado.web import HTTPError
from celery import states
from celery.result import AsyncResult
from celery.contrib.abortable import AbortableAsyncResult
from celery.backends.base import DisabledBackend
from ..utils import tasks
from ..views import BaseHandler
from ..utils.broker import Broker
from ..api.control import ControlHandler
logger = logging.getLogger(__name__)
class BaseTaskHandler(BaseHandler):
def get_task_args(self):
try:
body = self.request.body
options = json_decode(body) if body else {}
except ValueError as e:
raise HTTPError(400, str(e))
args = options.pop('args', [])
kwargs = options.pop('kwargs', {})
if not isinstance(args, (list, tuple)):
raise HTTPError(400, 'args must be an array')
return args, kwargs, options
@staticmethod
def backend_configured(result):
return not isinstance(result.backend, DisabledBackend)
def write_error(self, status_code, **kwargs):
self.set_status(status_code)
def update_response_result(self, response, result):
if result.state == states.FAILURE:
response.update({'result': self.safe_result(result.result),
'traceback': result.traceback})
else:
response.update({'result': self.safe_result(result.result)})
def normalize_options(self, options):
if 'eta' in options:
options['eta'] = datetime.strptime(options['eta'],
self.DATE_FORMAT)
if 'countdown' in options:
options['countdown'] = float(options['countdown'])
if 'expires' in options:
expires = options['expires']
try:
expires = float(expires)
except ValueError:
expires = datetime.strptime(expires, self.DATE_FORMAT)
options['expires'] = expires
def safe_result(self, result):
try:
json.dumps(result)
except TypeError:
return repr(result)
else:
return result
class TaskApply(BaseTaskHandler):
@web.authenticated
@web.asynchronous
def post(self, taskname):
args, kwargs, options = self.get_task_args()
logger.debug("Invoking a task '%s' with '%s' and '%s'",
taskname, args, kwargs)
try:
task = self.capp.tasks[taskname]
except KeyError:
raise HTTPError(404, "Unknown task '%s'" % taskname)
try:
self.normalize_options(options)
except ValueError:
raise HTTPError(400, 'Invalid option')
result = task.apply_async(args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
th = Thread(target=self.wait_results, args=(result, response, ))
th.start()
def wait_results(self, result, response):
result.get(propagate=False)
self.update_response_result(response, result)
if self.backend_configured(result):
response.update(state=result.state)
self.finish(response)
class TaskAsyncApply(BaseTaskHandler):
DATE_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
@web.authenticated
def post(self, taskname):
args, kwargs, options = self.get_task_args()
logger.debug("Invoking a task '%s' with '%s' and '%s'",
taskname, args, kwargs)
try:
task = self.capp.tasks[taskname]
except KeyError:
raise HTTPError(404, "Unknown task '%s'" % taskname)
try:
self.normalize_options(options)
except ValueError:
raise HTTPError(400, 'Invalid option')
result = task.apply_async(args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
if self.backend_configured(result):
response.update(state=result.state)
self.write(response)
class TaskSend(BaseTaskHandler):
@web.authenticated
def post(self, taskname):
args, kwargs, options = self.get_task_args()
logger.debug("Invoking task '%s' with '%s' and '%s'",
taskname, args, kwargs)
result = self.capp.send_task(
taskname, args=args, kwargs=kwargs, **options)
response = {'task-id': result.task_id}
if self.backend_configured(result):
response.update(state=result.state)
self.write(response)
class TaskResult(BaseTaskHandler):
@web.authenticated
def get(self, taskid):
timeout = self.get_argument('timeout', None)
timeout = float(timeout) if timeout is not None else None
result = AsyncResult(taskid)
if not self.backend_configured(result):
raise HTTPError(503)
response = {'task-id': taskid, 'state': result.state}
if timeout:
result.get(timeout=timeout, propagate=False)
self.update_response_result(response, result)
elif result.ready():
self.update_response_result(response, result)
self.write(response)
class TaskAbort(BaseTaskHandler):
@web.authenticated
def post(self, taskid):
logger.info("Aborting task '%s'", taskid)
result = AbortableAsyncResult(taskid)
if not self.backend_configured(result):
raise HTTPError(503)
result.abort()
self.write(dict(message="Aborted '%s'" % taskid))
class GetQueueLengths(BaseTaskHandler):
@web.authenticated
@gen.coroutine
def get(self):
app = self.application
broker_options = self.capp.conf.BROKER_TRANSPORT_OPTIONS
http_api = None
if app.transport == 'amqp' and app.options.broker_api:
http_api = app.options.broker_api
broker = Broker(app.capp.connection().as_uri(include_password=True),
http_api=http_api, broker_options=broker_options)
queue_names = ControlHandler.get_active_queue_names()
if not queue_names:
queue_names = set([self.capp.conf.CELERY_DEFAULT_QUEUE])
queues = yield broker.queues(sorted(queue_names))
self.write({'active_queues': queues})
class ListTasks(BaseTaskHandler):
@web.authenticated
def get(self):
app = self.application
limit = self.get_argument('limit', None)
worker = self.get_argument('workername', None)
type = self.get_argument('taskname', None)
state = self.get_argument('state', None)
limit = limit and int(limit)
worker = worker if worker != 'All' else None
type = type if type != 'All' else None
state = state if state != 'All' else None
result = []
for task_id, task in tasks.iter_tasks(
app.events, limit=limit, type=type,
worker=worker, state=state):
task = tasks.as_dict(task)
task.pop('worker', None)
result.append((task_id, task))
self.write(dict(result))
class ListTaskTypes(BaseTaskHandler):
@web.authenticated
def get(self):
seen_task_types = self.application.events.state.task_types()
response = {}
response['task-types'] = seen_task_types
self.write(response)
class TaskInfo(BaseTaskHandler):
@web.authenticated
def get(self, taskid):
task = tasks.get_task_by_id(self.application.events, taskid)
if not task:
raise HTTPError(404, "Unknown task '%s'" % taskid)
response = {}
for name in task._fields:
if name not in ['uuid', 'worker']:
response[name] = getattr(task, name, None)
response['task-id'] = task.uuid
if task.worker is not None:
response['worker'] = task.worker.hostname
self.write(response)
| true | true |
f724a3584071ba22c1c4ba5bdfbe1f6a5f6bdc1b | 3,428 | py | Python | source/draw_ising_ph.py | OminiaVincit/qphase-trans | 40e0c078dcd74282e8d8f44690433bf670bff8cb | [
"MIT"
] | null | null | null | source/draw_ising_ph.py | OminiaVincit/qphase-trans | 40e0c078dcd74282e8d8f44690433bf670bff8cb | [
"MIT"
] | null | null | null | source/draw_ising_ph.py | OminiaVincit/qphase-trans | 40e0c078dcd74282e8d8f44690433bf670bff8cb | [
"MIT"
] | null | null | null | import sys
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import time
import argparse
from visual_utils import generate_listcol
import seaborn as sns
def calculate_npent(death_scales):
sd = np.sum(death_scales)
npent = 0
for d in death_scales:
dr = d/sd
npent -= dr*np.log(dr)
npent = npent/np.log(sd)
return npent
if __name__ == '__main__':
# Check for command line arguments
parser = argparse.ArgumentParser()
parser.add_argument('--basename', type=str, default='exp_20200217_ising')
parser.add_argument('--res', type=str, default='results')
parser.add_argument('--dim', type=int, default=0)
args = parser.parse_args()
print(args)
resname, basename, d = args.res, args.basename, args.dim
plt.style.use('seaborn-colorblind')
#cycles = plt.rcParams['axes.prop_cycle'].by_key()['color']
cycles = generate_listcol(option=3)
print(cycles)
plt.rc('font', family='serif')
plt.rc('mathtext', fontset='cm')
plt.rcParams['font.size'] = 16
gs = [0.2, 0.8, 1.0, 1.2, 1.8]
N = len(gs)
fig, axs = plt.subplots(1, N, figsize=(3*N, 2.8), squeeze=False, sharey=True)
axs = axs.ravel()
#ax.set_xlabel(r"Transverse Field " r"$g$", fontsize=24)
mk = '_'
lstyle = 'dashed'
sz=80
alpha=1.0
Ls = [32, 64, 128, 256, 512, 1024]
for j in range(len(gs)):
ax = axs[j]
g = gs[j]
gidx = int((g - 0.1) / 0.05)
for i in range(len(Ls)):
L = Ls[i]
phfile = '{}_L_{}_ph_dim_{}.txt'.format(basename, L, d)
phfile = os.path.join(resname, phfile)
print(phfile)
if os.path.isfile(phfile):
arr = np.loadtxt(phfile)
death_scales, nlist = arr[:, 1], arr[:, 3]
ids1 = (death_scales != np.inf)
ids2 = (nlist == gidx)
ids = ids1 * ids2
death_scales = death_scales[ids]
npent = calculate_npent(death_scales)
print(arr.shape, gidx, len(death_scales), npent)
sns.kdeplot(death_scales, legend=False, shade=True, color=cycles[i], ax=ax, label='$L$={}'.format(L))
#sns.displot(death_scales[ids], bins=20, ax=ax)
#ax.plot(glist, npent_list, linestyle=lstyle, label = 'e-{}'.format(L))
#ax.plot(glist, pnorm_list, linestyle=lstyle, label = 'p-{}'.format(L))
#ax.plot(glist, vals_list, linestyle='solid', marker='o', color=cols[i], alpha=alpha, linewidth=1.0, markersize=8, label='L={}'.format(L))
#ax.scatter(glist, vals_list, s=sz, alpha=alpha, edgecolor='k', linewidths='1', label = 'L-{}'.format(L))
#ax.scatter(glist, pnorm_list, s=sz, alpha=alpha, label = 'p-{}'.format(L))
#ax.set_xlabel('Birth-scale')
ax.set_ylabel('')
ax.set_xticks([0.0, 0.5])
ax.tick_params(direction='out', length=8)
ax.set_xlim([0.0, 0.6])
ax.set_ylim([0, 60])
ax.set_title('$g$={},E={:.3f}'.format(g, npent))
axs[0].legend(fontsize=10)
#axs[0].set_ylabel('Density')
for figtype in ['png', 'pdf', 'svg']:
fig_ofile = os.path.join(resname, '{}_diagram_d_{}.{}'.format(basename,d, figtype))
plt.savefig(fig_ofile, bbox_inches='tight', format=figtype)
plt.show()
| 36.860215 | 154 | 0.57147 | import sys
import numpy as np
import matplotlib.pyplot as plt
import sys
import os
import time
import argparse
from visual_utils import generate_listcol
import seaborn as sns
def calculate_npent(death_scales):
sd = np.sum(death_scales)
npent = 0
for d in death_scales:
dr = d/sd
npent -= dr*np.log(dr)
npent = npent/np.log(sd)
return npent
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--basename', type=str, default='exp_20200217_ising')
parser.add_argument('--res', type=str, default='results')
parser.add_argument('--dim', type=int, default=0)
args = parser.parse_args()
print(args)
resname, basename, d = args.res, args.basename, args.dim
plt.style.use('seaborn-colorblind')
cycles = generate_listcol(option=3)
print(cycles)
plt.rc('font', family='serif')
plt.rc('mathtext', fontset='cm')
plt.rcParams['font.size'] = 16
gs = [0.2, 0.8, 1.0, 1.2, 1.8]
N = len(gs)
fig, axs = plt.subplots(1, N, figsize=(3*N, 2.8), squeeze=False, sharey=True)
axs = axs.ravel()
mk = '_'
lstyle = 'dashed'
sz=80
alpha=1.0
Ls = [32, 64, 128, 256, 512, 1024]
for j in range(len(gs)):
ax = axs[j]
g = gs[j]
gidx = int((g - 0.1) / 0.05)
for i in range(len(Ls)):
L = Ls[i]
phfile = '{}_L_{}_ph_dim_{}.txt'.format(basename, L, d)
phfile = os.path.join(resname, phfile)
print(phfile)
if os.path.isfile(phfile):
arr = np.loadtxt(phfile)
death_scales, nlist = arr[:, 1], arr[:, 3]
ids1 = (death_scales != np.inf)
ids2 = (nlist == gidx)
ids = ids1 * ids2
death_scales = death_scales[ids]
npent = calculate_npent(death_scales)
print(arr.shape, gidx, len(death_scales), npent)
sns.kdeplot(death_scales, legend=False, shade=True, color=cycles[i], ax=ax, label='$L$={}'.format(L))
ax.set_ylabel('')
ax.set_xticks([0.0, 0.5])
ax.tick_params(direction='out', length=8)
ax.set_xlim([0.0, 0.6])
ax.set_ylim([0, 60])
ax.set_title('$g$={},E={:.3f}'.format(g, npent))
axs[0].legend(fontsize=10)
for figtype in ['png', 'pdf', 'svg']:
fig_ofile = os.path.join(resname, '{}_diagram_d_{}.{}'.format(basename,d, figtype))
plt.savefig(fig_ofile, bbox_inches='tight', format=figtype)
plt.show()
| true | true |
f724a3bb3cfd2e2c82e1c443ccd2a3266923c550 | 5,090 | py | Python | src/niveristand/clientapi/realtimesequencedefinition.py | arnoldcsorvasi/niveristand-python | 39e5593e10bb372c801d6fa521e8fc166dab8cfe | [
"MIT"
] | 6 | 2018-07-04T10:59:43.000Z | 2022-03-24T13:34:33.000Z | src/niveristand/clientapi/realtimesequencedefinition.py | arnoldcsorvasi/niveristand-python | 39e5593e10bb372c801d6fa521e8fc166dab8cfe | [
"MIT"
] | 14 | 2018-11-05T20:05:33.000Z | 2022-03-10T12:54:58.000Z | src/niveristand/clientapi/realtimesequencedefinition.py | arnoldcsorvasi/niveristand-python | 39e5593e10bb372c801d6fa521e8fc166dab8cfe | [
"MIT"
] | 15 | 2018-07-04T07:58:49.000Z | 2022-02-22T16:35:26.000Z | import os
from niveristand import _errormessages, errors
from niveristand import _internal
from niveristand._translation.py2rtseq.utils import _py_param_name_to_rtseq_param_name
from niveristand.clientapi import stimulusprofileapi
from niveristand.clientapi._factory import _DefaultGatewayFactory
from niveristand.clientapi._sequencecallinfo import _SequenceCallInfoFactory
from niveristand.clientapi._sequenceparameterassignmentinfo import _SequenceParameterAssignmentInfoFactory
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Expression # noqa: I100
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import ForEachLoop
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import ForLoop
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import GenerateError
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import IfElse
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import LocalDeclaration
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Multitask
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import RealTimeSequence
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import ReturnDeclaration
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import StopTask
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Task
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import WhileLoop
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Yield
from System.IO import IOException
_internal.dummy()
factory = None
workspace = None
def add_local_variable(rt_seq, name, value):
name = _create_unique_lv_name(name)
local_declaration = LocalDeclaration(name, value._data_value)
rt_seq.Variables.LocalVariables.AddLocalVariable(local_declaration)
return name
def add_assignment(block, dest_name, source_name):
add_expression(block, '%s = %s' % (dest_name, source_name))
def add_expression(block, expression):
block.AddStatement(Expression('%s' % expression))
def add_yield(block):
block.AddStatement(Yield())
def add_if_else(block, test_condition):
if_else = IfElse(Expression(test_condition))
block.AddStatement(if_else)
return if_else
def add_for_loop(block, loop_variable, iterations):
for_loop = ForLoop(loop_variable, Expression(str(iterations)), False)
block.AddStatement(for_loop)
return for_loop
def add_foreach_loop(block, loop_variable, iterations):
foreach_loop = ForEachLoop(loop_variable, Expression(str(iterations)), False)
block.AddStatement(foreach_loop)
return foreach_loop
def add_while(block, test_condition):
while_block = WhileLoop(Expression(test_condition), False)
block.AddStatement(while_block)
return while_block
def add_multi_task(block):
multi_task = Multitask()
block.AddStatement(multi_task)
return multi_task
def add_task(multi_task, name):
task = Task(name)
multi_task.AddTask(task)
return task.Body
def create_real_time_sequence():
return RealTimeSequence()
def add_return_variable(rtseq, name, default_value):
name = _create_unique_lv_name(name)
return_declaration = ReturnDeclaration(name, default_value._data_value)
rtseq.Variables.ReturnType = return_declaration
return name
def add_generate_error(block, code, message, action):
block.AddStatement(GenerateError(code, message, action))
def add_stop_task(block, taskname):
block.AddStatement(StopTask(taskname))
def save_real_time_sequence(rtseq, filepath):
try:
rtseq.SaveSequence(os.path.join(filepath))
except(IOException) as e:
raise IOError(e.Message)
def _create_unique_lv_name(name):
try:
_create_unique_lv_name.lv_cnt += 1
except AttributeError:
_create_unique_lv_name.lv_cnt = 0
if name is None:
name = ''
name = 'lv_' + name + '_' + str(_create_unique_lv_name.lv_cnt)
_create_unique_lv_name.lv_cnt += 1
return name
def to_channel_ref_name(name):
return "ch_" + name
def _get_channel_node_info(name, node_info_list):
for channel in node_info_list:
if channel.FullPath == name:
return channel
raise errors.VeristandError(_errormessages.channel_not_found % name)
def run_rt_sequence(rt_sequence_path, rtseq_params):
rtseq_params = \
[_SequenceParameterAssignmentInfoFactory.create(_py_param_name_to_rtseq_param_name(key), rtseq_params[key])
for key in rtseq_params]
seq_call_info = _SequenceCallInfoFactory.create(rt_sequence_path, None, rtseq_params, False, 100000)
session = _DefaultGatewayFactory.get_new_stimulus_profile_session(rt_sequence_path, [seq_call_info], "")
sequence_control = session[os.path.splitext(os.path.basename(rt_sequence_path))[0] + ":1"]
state = stimulusprofileapi.StimulusProfileState(session)
sequence_control.register_sequence_complete_event_handler(state._sequence_complete_event_handler)
session.deploy(True)
return state
| 34.863014 | 115 | 0.803929 | import os
from niveristand import _errormessages, errors
from niveristand import _internal
from niveristand._translation.py2rtseq.utils import _py_param_name_to_rtseq_param_name
from niveristand.clientapi import stimulusprofileapi
from niveristand.clientapi._factory import _DefaultGatewayFactory
from niveristand.clientapi._sequencecallinfo import _SequenceCallInfoFactory
from niveristand.clientapi._sequenceparameterassignmentinfo import _SequenceParameterAssignmentInfoFactory
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Expression
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import ForEachLoop
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import ForLoop
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import GenerateError
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import IfElse
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import LocalDeclaration
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Multitask
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import RealTimeSequence
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import ReturnDeclaration
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import StopTask
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Task
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import WhileLoop
from NationalInstruments.VeriStand.RealTimeSequenceDefinitionApi import Yield
from System.IO import IOException
_internal.dummy()
factory = None
workspace = None
def add_local_variable(rt_seq, name, value):
name = _create_unique_lv_name(name)
local_declaration = LocalDeclaration(name, value._data_value)
rt_seq.Variables.LocalVariables.AddLocalVariable(local_declaration)
return name
def add_assignment(block, dest_name, source_name):
add_expression(block, '%s = %s' % (dest_name, source_name))
def add_expression(block, expression):
block.AddStatement(Expression('%s' % expression))
def add_yield(block):
block.AddStatement(Yield())
def add_if_else(block, test_condition):
if_else = IfElse(Expression(test_condition))
block.AddStatement(if_else)
return if_else
def add_for_loop(block, loop_variable, iterations):
for_loop = ForLoop(loop_variable, Expression(str(iterations)), False)
block.AddStatement(for_loop)
return for_loop
def add_foreach_loop(block, loop_variable, iterations):
foreach_loop = ForEachLoop(loop_variable, Expression(str(iterations)), False)
block.AddStatement(foreach_loop)
return foreach_loop
def add_while(block, test_condition):
while_block = WhileLoop(Expression(test_condition), False)
block.AddStatement(while_block)
return while_block
def add_multi_task(block):
multi_task = Multitask()
block.AddStatement(multi_task)
return multi_task
def add_task(multi_task, name):
task = Task(name)
multi_task.AddTask(task)
return task.Body
def create_real_time_sequence():
return RealTimeSequence()
def add_return_variable(rtseq, name, default_value):
name = _create_unique_lv_name(name)
return_declaration = ReturnDeclaration(name, default_value._data_value)
rtseq.Variables.ReturnType = return_declaration
return name
def add_generate_error(block, code, message, action):
block.AddStatement(GenerateError(code, message, action))
def add_stop_task(block, taskname):
block.AddStatement(StopTask(taskname))
def save_real_time_sequence(rtseq, filepath):
try:
rtseq.SaveSequence(os.path.join(filepath))
except(IOException) as e:
raise IOError(e.Message)
def _create_unique_lv_name(name):
try:
_create_unique_lv_name.lv_cnt += 1
except AttributeError:
_create_unique_lv_name.lv_cnt = 0
if name is None:
name = ''
name = 'lv_' + name + '_' + str(_create_unique_lv_name.lv_cnt)
_create_unique_lv_name.lv_cnt += 1
return name
def to_channel_ref_name(name):
return "ch_" + name
def _get_channel_node_info(name, node_info_list):
for channel in node_info_list:
if channel.FullPath == name:
return channel
raise errors.VeristandError(_errormessages.channel_not_found % name)
def run_rt_sequence(rt_sequence_path, rtseq_params):
rtseq_params = \
[_SequenceParameterAssignmentInfoFactory.create(_py_param_name_to_rtseq_param_name(key), rtseq_params[key])
for key in rtseq_params]
seq_call_info = _SequenceCallInfoFactory.create(rt_sequence_path, None, rtseq_params, False, 100000)
session = _DefaultGatewayFactory.get_new_stimulus_profile_session(rt_sequence_path, [seq_call_info], "")
sequence_control = session[os.path.splitext(os.path.basename(rt_sequence_path))[0] + ":1"]
state = stimulusprofileapi.StimulusProfileState(session)
sequence_control.register_sequence_complete_event_handler(state._sequence_complete_event_handler)
session.deploy(True)
return state
| true | true |
f724a43ca95266c2f6ed70bcb679da48a9313bcc | 1,756 | py | Python | dargor/colored_tracebacks.py | dargor/dargor-py | 54b97ac5aaeadd0535fdc492407015c770a5fd67 | [
"0BSD"
] | null | null | null | dargor/colored_tracebacks.py | dargor/dargor-py | 54b97ac5aaeadd0535fdc492407015c770a5fd67 | [
"0BSD"
] | null | null | null | dargor/colored_tracebacks.py | dargor/dargor-py | 54b97ac5aaeadd0535fdc492407015c770a5fd67 | [
"0BSD"
] | null | null | null | #
# Copyright (c) 2020, Gabriel Linder <linder.gabriel@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
# OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
# PERFORMANCE OF THIS SOFTWARE.
#
import asyncio
import sys
import traceback
from contextlib import suppress
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from pygments.lexers import Python3TracebackLexer
def excepthook(exc_type, exc_value, exc_traceback):
tb = ''.join(traceback.format_exception(exc_type,
exc_value,
exc_traceback))
lexer = Python3TracebackLexer(stripall=True, tabsize=4)
formatter = Terminal256Formatter(style='vim', bg='dark')
print(highlight(tb, lexer, formatter).strip(), file=sys.stderr)
def asyncio_exception_handler(loop, context):
with suppress(KeyError):
e = context['exception']
excepthook(type(e), e, e.__traceback__)
loop.default_exception_handler(context)
def install():
sys.excepthook = excepthook
loop = asyncio.get_event_loop()
loop.set_exception_handler(asyncio_exception_handler)
| 37.361702 | 79 | 0.735763 |
import asyncio
import sys
import traceback
from contextlib import suppress
from pygments import highlight
from pygments.formatters import Terminal256Formatter
from pygments.lexers import Python3TracebackLexer
def excepthook(exc_type, exc_value, exc_traceback):
tb = ''.join(traceback.format_exception(exc_type,
exc_value,
exc_traceback))
lexer = Python3TracebackLexer(stripall=True, tabsize=4)
formatter = Terminal256Formatter(style='vim', bg='dark')
print(highlight(tb, lexer, formatter).strip(), file=sys.stderr)
def asyncio_exception_handler(loop, context):
with suppress(KeyError):
e = context['exception']
excepthook(type(e), e, e.__traceback__)
loop.default_exception_handler(context)
def install():
sys.excepthook = excepthook
loop = asyncio.get_event_loop()
loop.set_exception_handler(asyncio_exception_handler)
| true | true |
f724a4c8c450af916d151c5bf8044ebed35a78ce | 5,230 | py | Python | NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Openpyxl/openpyxl-2.4.0-a1/openpyxl/writer/worksheet.py | sahirsharma/Martian | 062e9b47849512863c16713811f347ad7e121b56 | [
"MIT"
] | 7 | 2016-12-12T02:29:42.000Z | 2020-05-12T21:21:21.000Z | NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Openpyxl/openpyxl-2.4.0-a1/openpyxl/writer/worksheet.py | sahirsharma/Martian | 062e9b47849512863c16713811f347ad7e121b56 | [
"MIT"
] | 31 | 2017-01-05T06:07:28.000Z | 2018-05-27T13:13:06.000Z | NASA SPACEAPPS CHALLENGE/Solution/Software part/Astronomical Data and Python Libraries/Openpyxl/openpyxl-2.4.0-a1/openpyxl/writer/worksheet.py | sahirsharma/Martian | 062e9b47849512863c16713811f347ad7e121b56 | [
"MIT"
] | 3 | 2017-12-21T23:30:12.000Z | 2019-01-03T20:51:52.000Z | from __future__ import absolute_import
# Copyright (c) 2010-2016 openpyxl
"""Write worksheets to xml representations."""
# Python stdlib imports
from io import BytesIO
from openpyxl import LXML
# package imports
from openpyxl.xml.functions import (
Element,
xmlfile,
)
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.formatting import ConditionalFormatting
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.packaging.relationship import Relationship
from openpyxl.worksheet.merge import MergeCells, MergeCell
from openpyxl.worksheet.properties import WorksheetProperties
from openpyxl.worksheet.hyperlink import Hyperlink
from openpyxl.worksheet.related import Related
from openpyxl.worksheet.header_footer import HeaderFooter
from openpyxl.worksheet.dimensions import (
SheetFormatProperties,
SheetDimension,
)
from .etree_worksheet import write_cell
def write_mergecells(worksheet):
"""Write merged cells to xml."""
merged = [MergeCell(ref) for ref in worksheet._merged_cells]
if not merged:
return
return MergeCells(mergeCell=merged).to_tree()
def write_conditional_formatting(worksheet):
"""Write conditional formatting to xml."""
wb = worksheet.parent
for range_string, rules in worksheet.conditional_formatting.cf_rules.items():
cf = Element('conditionalFormatting', {'sqref': range_string})
for rule in rules:
if rule.dxf is not None:
if rule.dxf != DifferentialStyle():
rule.dxfId = len(wb._differential_styles)
wb._differential_styles.append(rule.dxf)
cf.append(rule.to_tree())
yield cf
def write_hyperlinks(worksheet):
"""Write worksheet hyperlinks to xml."""
if not worksheet._hyperlinks:
return
tag = Element('hyperlinks')
for link in worksheet._hyperlinks:
if link.target:
rel = Relationship(type="hyperlink", TargetMode="External", Target=link.target)
worksheet._rels.append(rel)
link.id = "rId{0}".format(len(worksheet._rels))
tag.append(link.to_tree())
return tag
def write_drawing(worksheet):
"""
Add link to drawing if required
"""
if worksheet._charts or worksheet._images:
rel = Relationship(type="drawing", Target="")
worksheet._rels.append(rel)
drawing = Related()
drawing.id = "rId%s" % len(worksheet._rels)
return drawing.to_tree("drawing")
def write_worksheet(worksheet, shared_strings):
"""Write a worksheet to an xml file."""
ws = worksheet
ws._rels = []
ws._hyperlinks = []
if LXML is True:
from .lxml_worksheet import write_cell, write_rows
else:
from .etree_worksheet import write_cell, write_rows
out = BytesIO()
with xmlfile(out) as xf:
with xf.element('worksheet', xmlns=SHEET_MAIN_NS):
props = ws.sheet_properties.to_tree()
xf.write(props)
dim = SheetDimension(ref=ws.calculate_dimension())
xf.write(dim.to_tree())
xf.write(ws.views.to_tree())
cols = ws.column_dimensions.to_tree()
ws.sheet_format.outlineLevelCol = ws.column_dimensions.max_outline
xf.write(ws.sheet_format.to_tree())
if cols is not None:
xf.write(cols)
# write data
write_rows(xf, ws)
if ws.protection.sheet:
xf.write(ws.protection.to_tree())
if ws.auto_filter:
xf.write(ws.auto_filter.to_tree())
if ws.sort_state:
xf.write(ws.sort_state.to_tree())
merge = write_mergecells(ws)
if merge is not None:
xf.write(merge)
cfs = write_conditional_formatting(ws)
for cf in cfs:
xf.write(cf)
if ws.data_validations:
xf.write(ws.data_validations.to_tree())
hyper = write_hyperlinks(ws)
if hyper is not None:
xf.write(hyper)
options = ws.print_options
if dict(options):
new_element = options.to_tree()
xf.write(new_element)
margins = ws.page_margins.to_tree()
xf.write(margins)
setup = ws.page_setup
if dict(setup):
new_element = setup.to_tree()
xf.write(new_element)
if bool(ws.HeaderFooter):
xf.write(ws.HeaderFooter.to_tree())
drawing = write_drawing(ws)
if drawing is not None:
xf.write(drawing)
# if there is an existing vml file associated with this sheet or if there
# are any comments we need to add a legacyDrawing relation to the vml file.
if (ws.legacy_drawing is not None or ws._comments):
legacyDrawing = Related(id="anysvml")
xml = legacyDrawing.to_tree("legacyDrawing")
xf.write(xml)
if ws.page_breaks:
xf.write(ws.page_breaks.to_tree())
xml = out.getvalue()
out.close()
return xml
| 28.895028 | 91 | 0.624092 | from __future__ import absolute_import
from io import BytesIO
from openpyxl import LXML
from openpyxl.xml.functions import (
Element,
xmlfile,
)
from openpyxl.xml.constants import SHEET_MAIN_NS
from openpyxl.formatting import ConditionalFormatting
from openpyxl.styles.differential import DifferentialStyle
from openpyxl.packaging.relationship import Relationship
from openpyxl.worksheet.merge import MergeCells, MergeCell
from openpyxl.worksheet.properties import WorksheetProperties
from openpyxl.worksheet.hyperlink import Hyperlink
from openpyxl.worksheet.related import Related
from openpyxl.worksheet.header_footer import HeaderFooter
from openpyxl.worksheet.dimensions import (
SheetFormatProperties,
SheetDimension,
)
from .etree_worksheet import write_cell
def write_mergecells(worksheet):
merged = [MergeCell(ref) for ref in worksheet._merged_cells]
if not merged:
return
return MergeCells(mergeCell=merged).to_tree()
def write_conditional_formatting(worksheet):
wb = worksheet.parent
for range_string, rules in worksheet.conditional_formatting.cf_rules.items():
cf = Element('conditionalFormatting', {'sqref': range_string})
for rule in rules:
if rule.dxf is not None:
if rule.dxf != DifferentialStyle():
rule.dxfId = len(wb._differential_styles)
wb._differential_styles.append(rule.dxf)
cf.append(rule.to_tree())
yield cf
def write_hyperlinks(worksheet):
if not worksheet._hyperlinks:
return
tag = Element('hyperlinks')
for link in worksheet._hyperlinks:
if link.target:
rel = Relationship(type="hyperlink", TargetMode="External", Target=link.target)
worksheet._rels.append(rel)
link.id = "rId{0}".format(len(worksheet._rels))
tag.append(link.to_tree())
return tag
def write_drawing(worksheet):
if worksheet._charts or worksheet._images:
rel = Relationship(type="drawing", Target="")
worksheet._rels.append(rel)
drawing = Related()
drawing.id = "rId%s" % len(worksheet._rels)
return drawing.to_tree("drawing")
def write_worksheet(worksheet, shared_strings):
ws = worksheet
ws._rels = []
ws._hyperlinks = []
if LXML is True:
from .lxml_worksheet import write_cell, write_rows
else:
from .etree_worksheet import write_cell, write_rows
out = BytesIO()
with xmlfile(out) as xf:
with xf.element('worksheet', xmlns=SHEET_MAIN_NS):
props = ws.sheet_properties.to_tree()
xf.write(props)
dim = SheetDimension(ref=ws.calculate_dimension())
xf.write(dim.to_tree())
xf.write(ws.views.to_tree())
cols = ws.column_dimensions.to_tree()
ws.sheet_format.outlineLevelCol = ws.column_dimensions.max_outline
xf.write(ws.sheet_format.to_tree())
if cols is not None:
xf.write(cols)
write_rows(xf, ws)
if ws.protection.sheet:
xf.write(ws.protection.to_tree())
if ws.auto_filter:
xf.write(ws.auto_filter.to_tree())
if ws.sort_state:
xf.write(ws.sort_state.to_tree())
merge = write_mergecells(ws)
if merge is not None:
xf.write(merge)
cfs = write_conditional_formatting(ws)
for cf in cfs:
xf.write(cf)
if ws.data_validations:
xf.write(ws.data_validations.to_tree())
hyper = write_hyperlinks(ws)
if hyper is not None:
xf.write(hyper)
options = ws.print_options
if dict(options):
new_element = options.to_tree()
xf.write(new_element)
margins = ws.page_margins.to_tree()
xf.write(margins)
setup = ws.page_setup
if dict(setup):
new_element = setup.to_tree()
xf.write(new_element)
if bool(ws.HeaderFooter):
xf.write(ws.HeaderFooter.to_tree())
drawing = write_drawing(ws)
if drawing is not None:
xf.write(drawing)
if (ws.legacy_drawing is not None or ws._comments):
legacyDrawing = Related(id="anysvml")
xml = legacyDrawing.to_tree("legacyDrawing")
xf.write(xml)
if ws.page_breaks:
xf.write(ws.page_breaks.to_tree())
xml = out.getvalue()
out.close()
return xml
| true | true |
f724a5ea1a9b0a0227598c547ffae0a1b2f3abd4 | 14,218 | py | Python | experimental/webserver/common/wsgi.py | manuparra/oc2dm | 5459c1fdde909fdd4d59b3ad29d7d5c962b23694 | [
"MIT"
] | 1 | 2019-11-27T18:34:12.000Z | 2019-11-27T18:34:12.000Z | experimental/webserver/common/wsgi.py | manuparra/oc2dm | 5459c1fdde909fdd4d59b3ad29d7d5c962b23694 | [
"MIT"
] | null | null | null | experimental/webserver/common/wsgi.py | manuparra/oc2dm | 5459c1fdde909fdd4d59b3ad29d7d5c962b23694 | [
"MIT"
] | 1 | 2020-04-10T14:44:44.000Z | 2020-04-10T14:44:44.000Z | # Copyright 2017 DiCTIS UGR
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers
"""
from __future__ import print_function
import errno
import functools
import os
import signal
import sys
import eventlet
import eventlet.greenio
import eventlet.wsgi
from eventlet.green import socket
from omlcc_catalog.common import config
from omlcc_catalog.common import exception
wsgi_opts = [
cfg.StrOpt('secure_proxy_ssl_header',
deprecated_for_removal=True,
deprecated_reason=_('Use the http_proxy_to_wsgi middleware '
'instead.'),
help=_('The HTTP header used to determine the scheme for the '
'original request, even if it was removed by an SSL '
'terminating proxy. Typical value is '
'"HTTP_X_FORWARDED_PROTO".')),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(bind_opts)
CONF.register_opts(socket_opts)
CONF.register_opts(eventlet_opts)
CONF.register_opts(wsgi_opts)
def set_eventlet_hub():
try:
eventlet.hubs.use_hub('poll')
except Exception:
try:
eventlet.hubs.use_hub('selects')
except Exception:
msg = _("eventlet 'poll' nor 'selects' hubs are available "
"on this platform")
raise exception.WorkerCreationFailure(
reason=msg)
class Server(object):
"""Server class to manage multiple WSGI sockets and applications.
This class requires initialize_glance_store set to True if
glance store needs to be initialized.
"""
def __init__(self, threads=1000, initialize_glance_store=False):
os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server")
self.threads = threads
self.children = set()
self.stale_children = set()
self.running = True
# NOTE(abhishek): Allows us to only re-initialize glance_store when
# the API's configuration reloads.
self.initialize_glance_store = initialize_glance_store
self.pgid = os.getpid()
try:
# NOTE(flaper87): Make sure this process
# runs in its own process group.
os.setpgid(self.pgid, self.pgid)
except OSError:
# NOTE(flaper87): When running glance-control,
# (glance's functional tests, for example)
# setpgid fails with EPERM as glance-control
# creates a fresh session, of which the newly
# launched service becomes the leader (session
# leaders may not change process groups)
#
# Running glance-(api|registry) is safe and
# shouldn't raise any error here.
self.pgid = 0
def hup(self, *args):
"""
Reloads configuration files with zero down time
"""
signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt
def kill_children(self, *args):
"""Kills the entire process group."""
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
os.killpg(self.pgid, signal.SIGTERM)
def start(self, application, default_port):
"""
Run a WSGI server with the given application.
:param application: The application to be run in the WSGI server
:param default_port: Port to bind to if none is specified in conf
"""
self.application = application
self.default_port = default_port
self.configure()
self.start_wsgi()
def start_wsgi(self):
workers = get_num_workers()
if workers == 0:
# Useful for profiling, test, debug etc.
self.pool = self.create_pool()
self.pool.spawn_n(self._single_run, self.application, self.sock)
return
else:
LOG.info(_LI("Starting %d workers"), workers)
signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup)
while len(self.children) < workers:
self.run_child()
def create_pool(self):
return get_asynchronous_eventlet_pool(size=self.threads)
def _remove_children(self, pid):
if pid in self.children:
self.children.remove(pid)
LOG.info(_LI('Removed dead child %s'), pid)
elif pid in self.stale_children:
self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid)
else:
LOG.warn(_LW('Unrecognised child %s') % pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:
LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error(_LE('Not respawning child %d, cannot '
'recover from termination') % pid)
if not self.children and not self.stale_children:
LOG.info(
_LI('All workers have terminated. Exiting'))
self.running = False
else:
if len(self.children) < get_num_workers():
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self._remove_children(pid)
self._verify_and_respawn_children(pid, status)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
break
except exception.SIGHUPInterrupt:
self.reload()
continue
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Exited')
def configure(self, old_conf=None, has_changed=None):
"""
Apply configuration settings
:param old_conf: Cached old configuration settings (if any)
:param has changed: callable to determine if a parameter has changed
"""
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.client_socket_timeout = CONF.client_socket_timeout or None
self.configure_socket(old_conf, has_changed)
if self.initialize_glance_store:
initialize_glance_store()
def reload(self):
"""
Reload and re-apply configuration settings
Existing child processes are sent a SIGHUP signal
and will exit after completing existing requests.
New child processes, which will have the updated
configuration, are spawned. This allows preventing
interruption to the service.
"""
def _has_changed(old, new, param):
old = old.get(param)
new = getattr(new, param)
return (new != old)
old_conf = utils.stash_conf_values()
has_changed = functools.partial(_has_changed, old_conf, CONF)
CONF.reload_config_files()
os.killpg(self.pgid, signal.SIGHUP)
self.stale_children = self.children
self.children = set()
# Ensure any logging config changes are picked up
logging.setup(CONF, 'glance')
config.set_config_defaults()
self.configure(old_conf, has_changed)
self.start_wsgi()
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
def child_hup(*args):
"""Shuts down child processes, existing requests are handled."""
signal.signal(signal.SIGHUP, signal.SIG_IGN)
eventlet.wsgi.is_accepting = False
self.sock.close()
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, child_hup)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
# ignore the interrupt signal to avoid a race whereby
# a child worker receives the signal before the parent
# and is respawned unnecessarily as a result
signal.signal(signal.SIGINT, signal.SIG_IGN)
# The child has no need to stash the unwrapped
# socket, and the reference prevents a clean
# exit on sighup
self._sock = None
self.run_server()
LOG.info(_LI('Child %d exiting normally'), os.getpid())
# self.pool.waitall() is now called in wsgi's server so
# it's safe to exit here
sys.exit(0)
else:
LOG.info(_LI('Started child %s'), pid)
self.children.add(pid)
def run_server(self):
"""Run a WSGI server."""
if cfg.CONF.pydev_worker_debug_host:
utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
cfg.CONF.pydev_worker_debug_port)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
self.pool = self.create_pool()
try:
eventlet.wsgi.server(self.sock,
self.application,
log=self._logger,
custom_pool=self.pool,
debug=False,
keepalive=CONF.http_keepalive,
socket_timeout=self.client_socket_timeout)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
# waiting on async pools
if ASYNC_EVENTLET_THREAD_POOL_LIST:
for pool in ASYNC_EVENTLET_THREAD_POOL_LIST:
pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
LOG.info(_LI("Starting single process server"))
eventlet.wsgi.server(sock, application, custom_pool=self.pool,
log=self._logger,
debug=False,
keepalive=CONF.http_keepalive,
socket_timeout=self.client_socket_timeout)
def configure_socket(self, old_conf=None, has_changed=None):
"""
Ensure a socket exists and is appropriately configured.
This function is called on start up, and can also be
called in the event of a configuration reload.
When called for the first time a new socket is created.
If reloading and either bind_host or bind port have been
changed the existing socket must be closed and a new
socket opened (laws of physics).
In all other cases (bind_host/bind_port have not changed)
the existing socket is reused.
:param old_conf: Cached old configuration settings (if any)
:param has changed: callable to determine if a parameter has changed
"""
# Do we need a fresh socket?
new_sock = (old_conf is None or (
has_changed('bind_host') or
has_changed('bind_port')))
# Will we be using https?
use_ssl = not (not CONF.cert_file or not CONF.key_file)
# Were we using https before?
old_use_ssl = (old_conf is not None and not (
not old_conf.get('key_file') or
not old_conf.get('cert_file')))
# Do we now need to perform an SSL wrap on the socket?
wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
# Do we now need to perform an SSL unwrap on the socket?
unwrap_sock = use_ssl is False and old_use_ssl is True
if new_sock:
self._sock = None
if old_conf is not None:
self.sock.close()
_sock = get_socket(self.default_port)
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self._sock = _sock
if wrap_sock:
self.sock = ssl_wrap_socket(self._sock)
if unwrap_sock:
self.sock = self._sock
if new_sock and not use_ssl:
self.sock = self._sock
# Pick up newly deployed certs
if old_conf is not None and use_ssl is True and old_use_ssl is True:
if has_changed('cert_file') or has_changed('key_file'):
utils.validate_key_cert(CONF.key_file, CONF.cert_file)
if has_changed('cert_file'):
self.sock.certfile = CONF.cert_file
if has_changed('key_file'):
self.sock.keyfile = CONF.key_file
if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
if old_conf is not None and has_changed('backlog'):
self.sock.listen(CONF.backlog)
| 38.531165 | 78 | 0.602546 |
from __future__ import print_function
import errno
import functools
import os
import signal
import sys
import eventlet
import eventlet.greenio
import eventlet.wsgi
from eventlet.green import socket
from omlcc_catalog.common import config
from omlcc_catalog.common import exception
wsgi_opts = [
cfg.StrOpt('secure_proxy_ssl_header',
deprecated_for_removal=True,
deprecated_reason=_('Use the http_proxy_to_wsgi middleware '
'instead.'),
help=_('The HTTP header used to determine the scheme for the '
'original request, even if it was removed by an SSL '
'terminating proxy. Typical value is '
'"HTTP_X_FORWARDED_PROTO".')),
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(bind_opts)
CONF.register_opts(socket_opts)
CONF.register_opts(eventlet_opts)
CONF.register_opts(wsgi_opts)
def set_eventlet_hub():
try:
eventlet.hubs.use_hub('poll')
except Exception:
try:
eventlet.hubs.use_hub('selects')
except Exception:
msg = _("eventlet 'poll' nor 'selects' hubs are available "
"on this platform")
raise exception.WorkerCreationFailure(
reason=msg)
class Server(object):
def __init__(self, threads=1000, initialize_glance_store=False):
os.umask(0o27)
self._logger = logging.getLogger("eventlet.wsgi.server")
self.threads = threads
self.children = set()
self.stale_children = set()
self.running = True
self.initialize_glance_store = initialize_glance_store
self.pgid = os.getpid()
try:
# NOTE(flaper87): Make sure this process
# runs in its own process group.
os.setpgid(self.pgid, self.pgid)
except OSError:
# NOTE(flaper87): When running glance-control,
# (glance's functional tests, for example)
self.pgid = 0
def hup(self, *args):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt
def kill_children(self, *args):
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
os.killpg(self.pgid, signal.SIGTERM)
def start(self, application, default_port):
self.application = application
self.default_port = default_port
self.configure()
self.start_wsgi()
def start_wsgi(self):
workers = get_num_workers()
if workers == 0:
# Useful for profiling, test, debug etc.
self.pool = self.create_pool()
self.pool.spawn_n(self._single_run, self.application, self.sock)
return
else:
LOG.info(_LI("Starting %d workers"), workers)
signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup)
while len(self.children) < workers:
self.run_child()
def create_pool(self):
return get_asynchronous_eventlet_pool(size=self.threads)
def _remove_children(self, pid):
if pid in self.children:
self.children.remove(pid)
LOG.info(_LI('Removed dead child %s'), pid)
elif pid in self.stale_children:
self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid)
else:
LOG.warn(_LW('Unrecognised child %s') % pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:
LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error(_LE('Not respawning child %d, cannot '
'recover from termination') % pid)
if not self.children and not self.stale_children:
LOG.info(
_LI('All workers have terminated. Exiting'))
self.running = False
else:
if len(self.children) < get_num_workers():
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self._remove_children(pid)
self._verify_and_respawn_children(pid, status)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
break
except exception.SIGHUPInterrupt:
self.reload()
continue
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Exited')
def configure(self, old_conf=None, has_changed=None):
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.client_socket_timeout = CONF.client_socket_timeout or None
self.configure_socket(old_conf, has_changed)
if self.initialize_glance_store:
initialize_glance_store()
def reload(self):
def _has_changed(old, new, param):
old = old.get(param)
new = getattr(new, param)
return (new != old)
old_conf = utils.stash_conf_values()
has_changed = functools.partial(_has_changed, old_conf, CONF)
CONF.reload_config_files()
os.killpg(self.pgid, signal.SIGHUP)
self.stale_children = self.children
self.children = set()
# Ensure any logging config changes are picked up
logging.setup(CONF, 'glance')
config.set_config_defaults()
self.configure(old_conf, has_changed)
self.start_wsgi()
def wait(self):
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
def child_hup(*args):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
eventlet.wsgi.is_accepting = False
self.sock.close()
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, child_hup)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
# ignore the interrupt signal to avoid a race whereby
# a child worker receives the signal before the parent
# and is respawned unnecessarily as a result
signal.signal(signal.SIGINT, signal.SIG_IGN)
# The child has no need to stash the unwrapped
# socket, and the reference prevents a clean
# exit on sighup
self._sock = None
self.run_server()
LOG.info(_LI('Child %d exiting normally'), os.getpid())
# self.pool.waitall() is now called in wsgi's server so
sys.exit(0)
else:
LOG.info(_LI('Started child %s'), pid)
self.children.add(pid)
def run_server(self):
if cfg.CONF.pydev_worker_debug_host:
utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
cfg.CONF.pydev_worker_debug_port)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
self.pool = self.create_pool()
try:
eventlet.wsgi.server(self.sock,
self.application,
log=self._logger,
custom_pool=self.pool,
debug=False,
keepalive=CONF.http_keepalive,
socket_timeout=self.client_socket_timeout)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
# waiting on async pools
if ASYNC_EVENTLET_THREAD_POOL_LIST:
for pool in ASYNC_EVENTLET_THREAD_POOL_LIST:
pool.waitall()
def _single_run(self, application, sock):
LOG.info(_LI("Starting single process server"))
eventlet.wsgi.server(sock, application, custom_pool=self.pool,
log=self._logger,
debug=False,
keepalive=CONF.http_keepalive,
socket_timeout=self.client_socket_timeout)
def configure_socket(self, old_conf=None, has_changed=None):
# Do we need a fresh socket?
new_sock = (old_conf is None or (
has_changed('bind_host') or
has_changed('bind_port')))
# Will we be using https?
use_ssl = not (not CONF.cert_file or not CONF.key_file)
# Were we using https before?
old_use_ssl = (old_conf is not None and not (
not old_conf.get('key_file') or
not old_conf.get('cert_file')))
# Do we now need to perform an SSL wrap on the socket?
wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
# Do we now need to perform an SSL unwrap on the socket?
unwrap_sock = use_ssl is False and old_use_ssl is True
if new_sock:
self._sock = None
if old_conf is not None:
self.sock.close()
_sock = get_socket(self.default_port)
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self._sock = _sock
if wrap_sock:
self.sock = ssl_wrap_socket(self._sock)
if unwrap_sock:
self.sock = self._sock
if new_sock and not use_ssl:
self.sock = self._sock
# Pick up newly deployed certs
if old_conf is not None and use_ssl is True and old_use_ssl is True:
if has_changed('cert_file') or has_changed('key_file'):
utils.validate_key_cert(CONF.key_file, CONF.cert_file)
if has_changed('cert_file'):
self.sock.certfile = CONF.cert_file
if has_changed('key_file'):
self.sock.keyfile = CONF.key_file
if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
if old_conf is not None and has_changed('backlog'):
self.sock.listen(CONF.backlog)
| true | true |
f724a61cd3bb5219a49f1a660296b229d468a428 | 2,178 | py | Python | A03_Wellst.py | spring-2019-csc-226/a03-master | 7be446b28d5aebfc4c4635cc418db0c0c0fadbca | [
"MIT"
] | null | null | null | A03_Wellst.py | spring-2019-csc-226/a03-master | 7be446b28d5aebfc4c4635cc418db0c0c0fadbca | [
"MIT"
] | 4 | 2019-02-07T06:26:00.000Z | 2019-02-19T00:08:43.000Z | A03_Wellst.py | spring-2019-csc-226/a03-master | 7be446b28d5aebfc4c4635cc418db0c0c0fadbca | [
"MIT"
] | 1 | 2019-02-07T02:23:37.000Z | 2019-02-07T02:23:37.000Z | #######################################################################################################################
# Taran Wells
# Wellst
# https://docs.google.com/document/d/1RBeOXjYBBjZ507wVeQVIPBrU7gBvTNJi8BYGDvtC53w/edit?usp=sharing
#######################################################################################################################
import turtle # allows us to use the turtles library
wn = turtle.Screen()
wn.colormode(255)
# setup turtles
base = turtle.Turtle()
base.hideturtle()
roof = turtle.Turtle()
roof.hideturtle()
glass = turtle.Turtle
wn.bgcolor("red")
def house_base(t, sz):
"""Base of house"""
t.color(250, 165, 10) # house orange
t.pendown()
t.begin_fill()
for side in range(2):
t.forward(sz)
t.right(90) # square house
t.forward(sz)
t.right(90)
t.end_fill()
t.penup()
def house_roof(t1, sz):
"""Roof of house"""
t1.color(135, 30, 160) # roof purple
t1.begin_fill()
for side in range(3):
t1.forward(sz) # shape roof
t1.left(120)
t1.end_fill()
t1.penup()
def placement(t2, sz):
"""place glass in starting position"""
t2.fd(sz)
t2.right(90)
t2.fd(sz)
def house_window(t3):
"""window on house"""
t3.begin_fill()
t3.pendown()
t3.pencolor('black')
for side in range(4):
t3.fd(35)
t3.right(90)
t3.fillcolor(30, 135, 160) # make window light blue
t3.end_fill()
def main():
roof.penup()
roof.back(30)
roof.pendown()
house_base(base, 140)
placement(base, 70)
house_roof(roof, 200)
house_window(base)
base.left(90)
house_window(base)
base.left(90)
house_window(base)
base.left(90)
house_window(base)
base.pu()
base.left(90)
base.fd(70)
base.right(90)
base.pd()
base.begin_fill()
for grass in range(2):
base.fd(1000)
base.left(90)
base.fd(2000)
base.left(90)
base.fd(1000)
base.left(90)
base.fd(1000)
base.fillcolor(0, 255, 0)
base.end_fill()
main() # calls on main function
wn.exitonclick()
| 21.78 | 119 | 0.524334 | true | true | |
f724a823d84a94955722b0212528e735eddb241d | 1,343 | py | Python | setup.py | GianmarcoFolchi/basketball_reference_scraper | 9d286b66bd2856f3fe0ba255552c5b81b2f87148 | [
"MIT"
] | null | null | null | setup.py | GianmarcoFolchi/basketball_reference_scraper | 9d286b66bd2856f3fe0ba255552c5b81b2f87148 | [
"MIT"
] | null | null | null | setup.py | GianmarcoFolchi/basketball_reference_scraper | 9d286b66bd2856f3fe0ba255552c5b81b2f87148 | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="basketball_reference_scraper",
version="1.0.28",
author="Vishaal Agartha",
author_email="vishaalagartha@gmail.com",
license="MIT",
description="A Python client for scraping stats and data from Basketball Reference",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vishaalagartha/basketball_reference_scraper",
packages=setuptools.find_packages(),
package_data={'basketball_reference_scraper': ['*.txt']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=[
'beautifulsoup4==4.8.2',
'bs4==0.0.1',
'lxml==4.6.3',
'numpy==1.18.1',
'pandas==0.25.3',
'python-dateutil==2.8.1',
'pytz==2019.3',
'requests==2.22.0',
'six==1.13.0',
'soupsieve==1.9.5'
],
extras_require={
'test': ['unittest'],
},
keywords=[
"nba",
"sports",
"data mining",
"basketball",
"basketball reference",
"basketball-reference.com",
],
)
| 27.979167 | 88 | 0.590469 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="basketball_reference_scraper",
version="1.0.28",
author="Vishaal Agartha",
author_email="vishaalagartha@gmail.com",
license="MIT",
description="A Python client for scraping stats and data from Basketball Reference",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/vishaalagartha/basketball_reference_scraper",
packages=setuptools.find_packages(),
package_data={'basketball_reference_scraper': ['*.txt']},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
install_requires=[
'beautifulsoup4==4.8.2',
'bs4==0.0.1',
'lxml==4.6.3',
'numpy==1.18.1',
'pandas==0.25.3',
'python-dateutil==2.8.1',
'pytz==2019.3',
'requests==2.22.0',
'six==1.13.0',
'soupsieve==1.9.5'
],
extras_require={
'test': ['unittest'],
},
keywords=[
"nba",
"sports",
"data mining",
"basketball",
"basketball reference",
"basketball-reference.com",
],
)
| true | true |
f724a831c2437c3daa32567515449cfccc35918a | 2,863 | py | Python | pairSums.py | ChuckCottrill/hack-rank | 91a219668ad682b5b00191ebd9571b0c02e133d7 | [
"BSD-2-Clause"
] | null | null | null | pairSums.py | ChuckCottrill/hack-rank | 91a219668ad682b5b00191ebd9571b0c02e133d7 | [
"BSD-2-Clause"
] | null | null | null | pairSums.py | ChuckCottrill/hack-rank | 91a219668ad682b5b00191ebd9571b0c02e133d7 | [
"BSD-2-Clause"
] | null | null | null |
Pair Sums
'''
Pair Sums
Given a list of n integers arr[0..(n-1)], determine the number of different pairs of elements within it which sum to k.
If an integer appears in the list multiple times, each copy is considered to be different; that is, two pairs are considered different if one pair includes at least one array index which the other doesn't, even if they include the same values.
Signature
int numberOfWays(int[] arr, int k)
Input
n is in the range [1, 100,000].
Each value arr[i] is in the range [1, 1,000,000,000].
k is in the range [1, 1,000,000,000].
Output
Return the number of different pairs of elements which sum to k.
Example 1
n = 5
k = 6
arr = [1, 2, 3, 4, 3]
output = 2
The valid pairs are 2+4 and 3+3.
Example 2
n = 5
k = 6
arr = [1, 5, 3, 3, 3]
output = 4
There's one valid pair 1+5, and three different valid pairs 3+3 (the 3rd and 4th elements, 3rd and 5th elements, and 4th and 5th elements).
'''
import math
# Add any extra import statements you may need here
# Add any helper functions you may need here
def numberOfWaysSimple(arr, k):
# Write your code here
count = 0
# for idx in range(len(arr)):
# match[arr[idx]] = []
for ix in range(len(arr)):
for iy in range(ix+1,len(arr)):
if arr[ix]+arr[iy] == k:
count += 1
return count
def numberOfWays(arr, k):
# Write your code here
count = 0
paired = []
match = {}
# create map to match with other element (not itself)
for ix,x in enumerate(arr):
if not x in match:
match[x] = []
match[x].append(ix)
for iy,y in enumerate(arr):
# pair, k = x+y, x = k-y
if k-y in match:
for ix in match[k-y]:
# skip itself
if ix == iy: continue
# print("[{}]{} + [{}]{} = {}".format(ix,arr[ix],iy,arr[iy],k))
paired.append( (arr[ix],y) )
count += 1
# print(paired)
# print(count/2)
return int(count/2)
# These are the tests we use to determine if the solution is correct.
# You can add your own at the bottom, but they are otherwise not editable!
def printInteger(n):
print('[', n, ']', sep='', end='')
test_case_number = 1
def check(expected, output):
global test_case_number
result = False
if expected == output:
result = True
rightTick = '\u2713'
wrongTick = '\u2717'
if result:
print(rightTick, 'Test #', test_case_number, sep='')
else:
print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')
printInteger(expected)
print(' Your output: ', end='')
printInteger(output)
print()
test_case_number += 1
if __name__ == "__main__":
k_1 = 6
arr_1 = [1, 2, 3, 4, 3]
expected_1 = 2
output_1 = numberOfWays(arr_1, k_1)
check(expected_1, output_1)
k_2 = 6
arr_2 = [1, 5, 3, 3, 3]
expected_2 = 4
output_2 = numberOfWays(arr_2, k_2)
check(expected_2, output_2)
# Add your own test cases here
| 26.027273 | 243 | 0.642683 |
Pair Sums
'''
Pair Sums
Given a list of n integers arr[0..(n-1)], determine the number of different pairs of elements within it which sum to k.
If an integer appears in the list multiple times, each copy is considered to be different; that is, two pairs are considered different if one pair includes at least one array index which the other doesn't, even if they include the same values.
Signature
int numberOfWays(int[] arr, int k)
Input
n is in the range [1, 100,000].
Each value arr[i] is in the range [1, 1,000,000,000].
k is in the range [1, 1,000,000,000].
Output
Return the number of different pairs of elements which sum to k.
Example 1
n = 5
k = 6
arr = [1, 2, 3, 4, 3]
output = 2
The valid pairs are 2+4 and 3+3.
Example 2
n = 5
k = 6
arr = [1, 5, 3, 3, 3]
output = 4
There's one valid pair 1+5, and three different valid pairs 3+3 (the 3rd and 4th elements, 3rd and 5th elements, and 4th and 5th elements).
'''
import math
def numberOfWaysSimple(arr, k):
count = 0
for ix in range(len(arr)):
for iy in range(ix+1,len(arr)):
if arr[ix]+arr[iy] == k:
count += 1
return count
def numberOfWays(arr, k):
count = 0
paired = []
match = {}
for ix,x in enumerate(arr):
if not x in match:
match[x] = []
match[x].append(ix)
for iy,y in enumerate(arr):
if k-y in match:
for ix in match[k-y]:
if ix == iy: continue
paired.append( (arr[ix],y) )
count += 1
return int(count/2)
def printInteger(n):
print('[', n, ']', sep='', end='')
test_case_number = 1
def check(expected, output):
global test_case_number
result = False
if expected == output:
result = True
rightTick = '\u2713'
wrongTick = '\u2717'
if result:
print(rightTick, 'Test #', test_case_number, sep='')
else:
print(wrongTick, 'Test #', test_case_number, ': Expected ', sep='', end='')
printInteger(expected)
print(' Your output: ', end='')
printInteger(output)
print()
test_case_number += 1
if __name__ == "__main__":
k_1 = 6
arr_1 = [1, 2, 3, 4, 3]
expected_1 = 2
output_1 = numberOfWays(arr_1, k_1)
check(expected_1, output_1)
k_2 = 6
arr_2 = [1, 5, 3, 3, 3]
expected_2 = 4
output_2 = numberOfWays(arr_2, k_2)
check(expected_2, output_2)
| false | true |
f724a86f608abd7e0fcfb80a41433b1d6e143ea0 | 3,471 | py | Python | setup.py | pbellec/SUITPy | a0450518100d3f5f86423f48d2b7f22c68deebe9 | [
"MIT"
] | null | null | null | setup.py | pbellec/SUITPy | a0450518100d3f5f86423f48d2b7f22c68deebe9 | [
"MIT"
] | null | null | null | setup.py | pbellec/SUITPy | a0450518100d3f5f86423f48d2b7f22c68deebe9 | [
"MIT"
] | null | null | null | #! /usr/bin/env python
"""
@author: maedbhking
based heavily on flexible functionality of nilearn `setup.py`
"""
descr = """A python package for cerebellar neuroimaging..."""
import sys
import os
from setuptools import setup, find_packages
def load_version():
"""Executes SUITPy/version.py in a globals dictionary and return it.
Note: importing SUITPy is not an option because there may be
dependencies like nibabel which are not installed and
setup.py is supposed to install them.
"""
# load all vars into globals, otherwise
# the later function call using global vars doesn't work.
globals_dict = {}
with open(os.path.join('SUITPy', 'version.py')) as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
def list_required_packages():
required_packages = []
required_packages_orig = ['%s>=%s' % (mod, meta['min_version'])
for mod, meta
in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
]
for package in required_packages_orig:
required_packages.append(package)
return required_packages
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
_VERSION_GLOBALS = load_version()
DISTNAME = 'SUITPy'
DESCRIPTION = 'Mapping and plotting cerebellar fMRI data in Python'
with open('README.rst') as fp:
LONG_DESCRIPTION = fp.read()
MAINTAINER = 'Maedbh King'
MAINTAINER_EMAIL = 'maedbhking@berkeley.edu'
URL = 'https://github.com/DiedrichsenLab/SUITPy'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/DiedrichsenLab/SUITPy/archive/refs/tags/v1.0.3.tar.gz'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_SUITPy_installing=True)
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False, # the package can run out of an .egg file
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(),
package_data={
'SUITPy.surfaces': ['*.surf.gii', '*.C.scene', '*.shape.gii', '*.txt'],
},
install_requires=list_required_packages(),
python_requires='>=3.6',
)
| 34.366337 | 89 | 0.63123 |
descr = """A python package for cerebellar neuroimaging..."""
import sys
import os
from setuptools import setup, find_packages
def load_version():
globals_dict = {}
with open(os.path.join('SUITPy', 'version.py')) as fp:
exec(fp.read(), globals_dict)
return globals_dict
def is_installing():
# Allow command-lines such as "python setup.py build install"
install_commands = set(['install', 'develop'])
return install_commands.intersection(set(sys.argv))
def list_required_packages():
required_packages = []
required_packages_orig = ['%s>=%s' % (mod, meta['min_version'])
for mod, meta
in _VERSION_GLOBALS['REQUIRED_MODULE_METADATA']
]
for package in required_packages_orig:
required_packages.append(package)
return required_packages
# Make sources available using relative paths from this file's directory.
os.chdir(os.path.dirname(os.path.abspath(__file__)))
_VERSION_GLOBALS = load_version()
DISTNAME = 'SUITPy'
DESCRIPTION = 'Mapping and plotting cerebellar fMRI data in Python'
with open('README.rst') as fp:
LONG_DESCRIPTION = fp.read()
MAINTAINER = 'Maedbh King'
MAINTAINER_EMAIL = 'maedbhking@berkeley.edu'
URL = 'https://github.com/DiedrichsenLab/SUITPy'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/DiedrichsenLab/SUITPy/archive/refs/tags/v1.0.3.tar.gz'
VERSION = _VERSION_GLOBALS['__version__']
if __name__ == "__main__":
if is_installing():
module_check_fn = _VERSION_GLOBALS['_check_module_dependencies']
module_check_fn(is_SUITPy_installing=True)
setup(name=DISTNAME,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=LONG_DESCRIPTION,
zip_safe=False,
classifiers=[
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
packages=find_packages(),
package_data={
'SUITPy.surfaces': ['*.surf.gii', '*.C.scene', '*.shape.gii', '*.txt'],
},
install_requires=list_required_packages(),
python_requires='>=3.6',
)
| true | true |
f724a8783ab01ba4bbe2973c39db61d104a5a72d | 4,839 | py | Python | pydeep/dbn.py | dsakagi/pydeep | e4d14da74d6a6d007a9cd627b5dd81c9f1bfaa72 | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2017-09-22T20:28:32.000Z | 2020-04-01T23:55:49.000Z | pydeep/dbn.py | dsakagi/pydeep | e4d14da74d6a6d007a9cd627b5dd81c9f1bfaa72 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | pydeep/dbn.py | dsakagi/pydeep | e4d14da74d6a6d007a9cd627b5dd81c9f1bfaa72 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | import rbm_rm as RBM
import Learning_rm as Learning
class DBN:
def __init__(self, n_in, n_out, hidden_arch, prediction_type, lts=None, net_regs=None, gaussian_input=False, default_lt='Logistic'):
self.n_in = n_in
self.n_out= n_out
# determine the architectures for the autoencoder and the
# prediction network
if isinstance(hidden_arch, int):
hidden_arch = [hidden_arch]
if lts is None:
lts = [default_lt for i in xrange(len(hidden_arch) ) ] + [prediction_type]
self._init_rbms(hidden_arch, lts, gaussian_input)
self._init_ae(hidden_arch, lts, gaussian_input)
self._init_net(hidden_arch, lts, net_regs)
def _init_rbms(self, hidden_arch, lts, gaussian_input):
self.rbms = [None for i in xrange(len(lts) -1)]
if gaussian_input:
self.rbms[0] = RBM.GV_RBM(hidden_arch[0], self.n_in)
else:
self.rbms[0] = RBM.RBM(hidden_arch[0], self.n_in)
for i in xrange(len(self.rbms) - 1):
self.rbms[i+1] = RBM.RBM(hidden_arch[i+1], hidden_arch[i])
def _init_ae(self, hidden_arch, lts, gaussian_input):
if len(hidden_arch) > 1:
ae_arch = [self.n_in] + hidden_arch + hidden_arch[-2::-1] + [self.n_in]
else:
ae_arch = [self.n_in] + hidden_arch + [self.n_in]
ae_lts = lts[:-1] + lts[-2::-1]
if gaussian_input:
ae_lts[-1] = 'Linear'
self.ae = Learning.Net(ae_arch, ae_lts, [Learning.NetReg() for lt in ae_lts])
def _init_net(self, hidden_arch, lts, net_regs):
arch = [self.n_in] + hidden_arch + [self.n_out]
if net_regs is None:
net_regs = [Learning.NetReg() for lt in lts]
for reg in net_regs:
reg.dropout = True
reg.drop_rate = 0.5
reg.max_constraint = True
reg.max_unit_weight = 15
net_regs[0].drop_rate = 0.2
if lts[-1] is 'Softmax':
e_type = 'CrossEntropy'
else:
e_type = 'MSE'
self.net = Learning.Net(arch, lts, net_regs, e_type)
def pretrain(self, train, valid, rbm_train_params=None, rbm_0_params=None, ae_train_params=None):
provided_all_params = isinstance(rbm_train_params, list)
if rbm_train_params is None:
rbm_train_params = [None] + [RBM.RBMTrainParams() for rbm in self.rbms[1:]]
elif isinstance(rbm_train_params, RBM.RBMTrainParams):
rbm_train_params = [None] + [rbm_train_params for rbm in self.rbms[1:]]
if not provided_all_params:
for rbm_p in rbm_train_params[1:]:
rbm_p.maxepoch = 5
rbm_p.mu = Learning.ConstantSchedule(0.0)
rbm_p.eta = Learning.ConstantSchedule(0.1)
if rbm_0_params is not None and not provided_all_params:
rbm_train_params[0] = rbm_0_params
elif not provided_all_params:
rbm_train_params[0] = RBM.RBMTrainParams()
self.pretrain_rbm_stack(train, valid, rbm_train_params)
self.transfer_weights_rbm_2_ae()
self.pretrain_ae(train, valid, ae_train_params)
self.transfer_weights_ae_2_net()
def transfer_weights_rbm_2_ae(self):
for i in xrange(len(self.rbms)):
self.ae.layers[i].W = self.rbms[i].W.copy()
self.ae.layers[i].h = self.rbms[i].h.copy()
for i in xrange(len(self.rbms)):
nlay = len(self.ae.layers)
j = nlay - i - 1
self.ae.layers[j].W = self.rbms[i].W.transpose()
self.ae.layers[j].h = self.rbms[i].v.copy()
def transfer_weights_ae_2_net(self):
for i in xrange(len(self.net.layers) - 1):
self.net.layers[i].W = self.ae.layers[i].W.copy()
self.net.layers[i].h = self.ae.layers[i].h.copy()
def pretrain_rbm_stack(self, train, valid, rbm_train_params):
trep = train #This will be the representation of data at each level
vrep = valid
for i in xrange(len(self.rbms)):
RBM.learn(self.rbms[i], trep, vrep, rbm_train_params[i])
print 'Finished training layer %d' % i
trep = self.rbms[i].up(trep)
vrep = self.rbms[i].up(vrep)
def pretrain_ae(self, train, valid, ae_train_params):
if ae_train_params is None:
ae_train_params = Learning.NetTrainParams()
ae_train_params.mu = Learning.ConstantSchedule(0.1)
ae_train_params.eta = Learning.ConstantSchedule(1.0)
Learning.train_sgd_valid(self.ae, train, train, valid, valid, ae_train_params)
def train(self, train, targets, net_train_params):
Learning.train_sgd(self.net, train, targets, net_train_params)
def predict(self, data):
return self.net.predict(data)
| 40.325 | 136 | 0.613557 | import rbm_rm as RBM
import Learning_rm as Learning
class DBN:
def __init__(self, n_in, n_out, hidden_arch, prediction_type, lts=None, net_regs=None, gaussian_input=False, default_lt='Logistic'):
self.n_in = n_in
self.n_out= n_out
if isinstance(hidden_arch, int):
hidden_arch = [hidden_arch]
if lts is None:
lts = [default_lt for i in xrange(len(hidden_arch) ) ] + [prediction_type]
self._init_rbms(hidden_arch, lts, gaussian_input)
self._init_ae(hidden_arch, lts, gaussian_input)
self._init_net(hidden_arch, lts, net_regs)
def _init_rbms(self, hidden_arch, lts, gaussian_input):
self.rbms = [None for i in xrange(len(lts) -1)]
if gaussian_input:
self.rbms[0] = RBM.GV_RBM(hidden_arch[0], self.n_in)
else:
self.rbms[0] = RBM.RBM(hidden_arch[0], self.n_in)
for i in xrange(len(self.rbms) - 1):
self.rbms[i+1] = RBM.RBM(hidden_arch[i+1], hidden_arch[i])
def _init_ae(self, hidden_arch, lts, gaussian_input):
if len(hidden_arch) > 1:
ae_arch = [self.n_in] + hidden_arch + hidden_arch[-2::-1] + [self.n_in]
else:
ae_arch = [self.n_in] + hidden_arch + [self.n_in]
ae_lts = lts[:-1] + lts[-2::-1]
if gaussian_input:
ae_lts[-1] = 'Linear'
self.ae = Learning.Net(ae_arch, ae_lts, [Learning.NetReg() for lt in ae_lts])
def _init_net(self, hidden_arch, lts, net_regs):
arch = [self.n_in] + hidden_arch + [self.n_out]
if net_regs is None:
net_regs = [Learning.NetReg() for lt in lts]
for reg in net_regs:
reg.dropout = True
reg.drop_rate = 0.5
reg.max_constraint = True
reg.max_unit_weight = 15
net_regs[0].drop_rate = 0.2
if lts[-1] is 'Softmax':
e_type = 'CrossEntropy'
else:
e_type = 'MSE'
self.net = Learning.Net(arch, lts, net_regs, e_type)
def pretrain(self, train, valid, rbm_train_params=None, rbm_0_params=None, ae_train_params=None):
provided_all_params = isinstance(rbm_train_params, list)
if rbm_train_params is None:
rbm_train_params = [None] + [RBM.RBMTrainParams() for rbm in self.rbms[1:]]
elif isinstance(rbm_train_params, RBM.RBMTrainParams):
rbm_train_params = [None] + [rbm_train_params for rbm in self.rbms[1:]]
if not provided_all_params:
for rbm_p in rbm_train_params[1:]:
rbm_p.maxepoch = 5
rbm_p.mu = Learning.ConstantSchedule(0.0)
rbm_p.eta = Learning.ConstantSchedule(0.1)
if rbm_0_params is not None and not provided_all_params:
rbm_train_params[0] = rbm_0_params
elif not provided_all_params:
rbm_train_params[0] = RBM.RBMTrainParams()
self.pretrain_rbm_stack(train, valid, rbm_train_params)
self.transfer_weights_rbm_2_ae()
self.pretrain_ae(train, valid, ae_train_params)
self.transfer_weights_ae_2_net()
def transfer_weights_rbm_2_ae(self):
for i in xrange(len(self.rbms)):
self.ae.layers[i].W = self.rbms[i].W.copy()
self.ae.layers[i].h = self.rbms[i].h.copy()
for i in xrange(len(self.rbms)):
nlay = len(self.ae.layers)
j = nlay - i - 1
self.ae.layers[j].W = self.rbms[i].W.transpose()
self.ae.layers[j].h = self.rbms[i].v.copy()
def transfer_weights_ae_2_net(self):
for i in xrange(len(self.net.layers) - 1):
self.net.layers[i].W = self.ae.layers[i].W.copy()
self.net.layers[i].h = self.ae.layers[i].h.copy()
def pretrain_rbm_stack(self, train, valid, rbm_train_params):
trep = train
vrep = valid
for i in xrange(len(self.rbms)):
RBM.learn(self.rbms[i], trep, vrep, rbm_train_params[i])
print 'Finished training layer %d' % i
trep = self.rbms[i].up(trep)
vrep = self.rbms[i].up(vrep)
def pretrain_ae(self, train, valid, ae_train_params):
if ae_train_params is None:
ae_train_params = Learning.NetTrainParams()
ae_train_params.mu = Learning.ConstantSchedule(0.1)
ae_train_params.eta = Learning.ConstantSchedule(1.0)
Learning.train_sgd_valid(self.ae, train, train, valid, valid, ae_train_params)
def train(self, train, targets, net_train_params):
Learning.train_sgd(self.net, train, targets, net_train_params)
def predict(self, data):
return self.net.predict(data)
| false | true |
f724a880d570332da47359919de6f56a2a986caf | 821 | py | Python | deckz/cli/watch.py | m09/deckz | 0f97ef2a43c2c714ac18173a4fe3266cccba31e2 | [
"Apache-2.0"
] | null | null | null | deckz/cli/watch.py | m09/deckz | 0f97ef2a43c2c714ac18173a4fe3266cccba31e2 | [
"Apache-2.0"
] | 41 | 2020-04-06T13:49:18.000Z | 2020-12-24T11:14:47.000Z | deckz/cli/watch.py | m09/deckz | 0f97ef2a43c2c714ac18173a4fe3266cccba31e2 | [
"Apache-2.0"
] | null | null | null | from logging import getLogger
from pathlib import Path
from typing import List, Optional
from typer import Argument
from deckz.cli import app
from deckz.paths import Paths
from deckz.watching import watch as watching_watch
_logger = getLogger(__name__)
@app.command()
def watch(
targets: Optional[List[str]] = Argument(None),
handout: bool = False,
presentation: bool = True,
print: bool = False,
minimum_delay: int = 5,
deck_path: Path = Path("."),
) -> None:
"""Compile on change."""
_logger.info("Watching current and shared directories")
watching_watch(
minimum_delay=minimum_delay,
paths=Paths.from_defaults(deck_path),
build_handout=handout,
build_presentation=presentation,
build_print=print,
target_whitelist=targets,
)
| 24.878788 | 59 | 0.699147 | from logging import getLogger
from pathlib import Path
from typing import List, Optional
from typer import Argument
from deckz.cli import app
from deckz.paths import Paths
from deckz.watching import watch as watching_watch
_logger = getLogger(__name__)
@app.command()
def watch(
targets: Optional[List[str]] = Argument(None),
handout: bool = False,
presentation: bool = True,
print: bool = False,
minimum_delay: int = 5,
deck_path: Path = Path("."),
) -> None:
_logger.info("Watching current and shared directories")
watching_watch(
minimum_delay=minimum_delay,
paths=Paths.from_defaults(deck_path),
build_handout=handout,
build_presentation=presentation,
build_print=print,
target_whitelist=targets,
)
| true | true |
f724a8d819de226898e0d3a10a65ce1725cf1a9d | 1,629 | py | Python | tce/tcloud/cvm/ResetInstancesInternetMaxBandwidth.py | liangzhengkang/tencentcloud-sdk-python | c8f990b33f3701e04149a3d613538829a88269eb | [
"Apache-2.0"
] | null | null | null | tce/tcloud/cvm/ResetInstancesInternetMaxBandwidth.py | liangzhengkang/tencentcloud-sdk-python | c8f990b33f3701e04149a3d613538829a88269eb | [
"Apache-2.0"
] | null | null | null | tce/tcloud/cvm/ResetInstancesInternetMaxBandwidth.py | liangzhengkang/tencentcloud-sdk-python | c8f990b33f3701e04149a3d613538829a88269eb | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
# 导入对应产品模块的client models。
from tencentcloud.cvm.v20170312 import cvm_client, models
import json
# 导入可选配置类
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
try:
# 实例化一个认证对象,入参需要传入腾讯云账户secretId,secretKey
cred = credential.Credential("AKIDylMjqkOq7Azay9Nq8D5kCSVM1Sfft4Sd", "K8lBONAk7IEzXt30kGXcS5UfbJm0zkG4")
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm.api3.test.403a.tcecqpoc.fsphere.cn"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
# 实例化要请求产品(以cvm为例)的client对象,clientProfile是可选的。
client = cvm_client.CvmClient(cred, "shanghai", clientProfile)
# 实例化一个cvm实例信息查询请求对象,每个接口都会对应一个request对象。
req = models.ResetInstancesInternetMaxBandwidthRequest()
# 这里还支持以标准json格式的string来赋值请求参数的方式。下面的代码跟上面的参数赋值是等效的。
params = '{"InstanceIds":["ins-gwggvy39"],"InternetAccessible":{"InternetMaxBandwidthOut":30}}'
req.from_json_string(params)
# 通过client对象调用DescribeInstances方法发起请求。注意请求方法名与请求对象是对应的。
# 返回的resp是一个DescribeInstancesResponse类的实例,与请求对象对应。
resp = client.ResetInstancesInternetMaxBandwidth(req)
# 输出json格式的字符串回包
print(resp.to_json_string())
# 也可以取出单个值。
# 你可以通过官网接口文档或跳转到response对象的定义处查看返回字段的定义。
# print(resp.TotalCount)
except TencentCloudSDKException as err:
print(err) | 33.9375 | 108 | 0.796808 |
import os
from tencentcloud.common import credential
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.cvm.v20170312 import cvm_client, models
import json
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.profile.http_profile import HttpProfile
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
try:
cred = credential.Credential("AKIDylMjqkOq7Azay9Nq8D5kCSVM1Sfft4Sd", "K8lBONAk7IEzXt30kGXcS5UfbJm0zkG4")
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm.api3.test.403a.tcecqpoc.fsphere.cn"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = cvm_client.CvmClient(cred, "shanghai", clientProfile)
req = models.ResetInstancesInternetMaxBandwidthRequest()
params = '{"InstanceIds":["ins-gwggvy39"],"InternetAccessible":{"InternetMaxBandwidthOut":30}}'
req.from_json_string(params)
resp = client.ResetInstancesInternetMaxBandwidth(req)
print(resp.to_json_string())
except TencentCloudSDKException as err:
print(err) | true | true |
f724a945f1fb6084253128c597e56a1ed312286b | 654 | py | Python | src/contas/models.py | br-monteiro/learning-django | 68f16f17e0b4357d15a5b7e9c9a66da2bccd7a63 | [
"MIT"
] | null | null | null | src/contas/models.py | br-monteiro/learning-django | 68f16f17e0b4357d15a5b7e9c9a66da2bccd7a63 | [
"MIT"
] | null | null | null | src/contas/models.py | br-monteiro/learning-django | 68f16f17e0b4357d15a5b7e9c9a66da2bccd7a63 | [
"MIT"
] | null | null | null | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Categorias'
class Transactions(models.Model):
date = models.DateField()
description = models.CharField(max_length=100)
value = models.DecimalField(max_digits=7, decimal_places=2)
observations = models.TextField(null=True, blank=True)
category=models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.description
class Meta:
verbose_name_plural = 'Transações' | 26.16 | 64 | 0.759939 | from django.db import models
class Category(models.Model):
name = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = 'Categorias'
class Transactions(models.Model):
date = models.DateField()
description = models.CharField(max_length=100)
value = models.DecimalField(max_digits=7, decimal_places=2)
observations = models.TextField(null=True, blank=True)
category=models.ForeignKey(Category, on_delete=models.CASCADE)
def __str__(self):
return self.description
class Meta:
verbose_name_plural = 'Transações' | true | true |
f724aaa61baa5d75e1ec4a635d7608adf9883661 | 3,135 | py | Python | src/dms-preview/azext_dms/vendored_sdks/datamigration/models/migrate_sql_server_sql_mi_task_output_agent_job_level.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2019-05-10T19:58:09.000Z | 2019-05-10T19:58:09.000Z | src/dms-preview/azext_dms/vendored_sdks/datamigration/models/migrate_sql_server_sql_mi_task_output_agent_job_level.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | src/dms-preview/azext_dms/vendored_sdks/datamigration/models/migrate_sql_server_sql_mi_task_output_agent_job_level.py | mayank88mahajan/azure-cli-extensions | 8bd389a1877bffd14052bec5519ce75dc6fc34cf | [
"MIT"
] | 1 | 2018-08-28T14:36:47.000Z | 2018-08-28T14:36:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .migrate_sql_server_sql_mi_task_output import MigrateSqlServerSqlMITaskOutput
class MigrateSqlServerSqlMITaskOutputAgentJobLevel(MigrateSqlServerSqlMITaskOutput):
"""MigrateSqlServerSqlMITaskOutputAgentJobLevel.
Variables are only populated by the server, and will be ignored when
sending a request.
All required parameters must be populated in order to send to Azure.
:ivar id: Result identifier
:vartype id: str
:param result_type: Required. Constant filled by server.
:type result_type: str
:ivar name: Agent Job name.
:vartype name: str
:ivar is_enabled: The state of the original Agent Job.
:vartype is_enabled: bool
:ivar state: Current state of migration. Possible values include: 'None',
'InProgress', 'Failed', 'Warning', 'Completed', 'Skipped', 'Stopped'
:vartype state: str or ~azure.mgmt.datamigration.models.MigrationState
:ivar started_on: Migration start time
:vartype started_on: datetime
:ivar ended_on: Migration end time
:vartype ended_on: datetime
:ivar message: Migration progress message
:vartype message: str
:ivar exceptions_and_warnings: Migration errors and warnings per job
:vartype exceptions_and_warnings:
list[~azure.mgmt.datamigration.models.ReportableException]
"""
_validation = {
'id': {'readonly': True},
'result_type': {'required': True},
'name': {'readonly': True},
'is_enabled': {'readonly': True},
'state': {'readonly': True},
'started_on': {'readonly': True},
'ended_on': {'readonly': True},
'message': {'readonly': True},
'exceptions_and_warnings': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'state': {'key': 'state', 'type': 'str'},
'started_on': {'key': 'startedOn', 'type': 'iso-8601'},
'ended_on': {'key': 'endedOn', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'exceptions_and_warnings': {'key': 'exceptionsAndWarnings', 'type': '[ReportableException]'},
}
def __init__(self, **kwargs):
super(MigrateSqlServerSqlMITaskOutputAgentJobLevel, self).__init__(**kwargs)
self.name = None
self.is_enabled = None
self.state = None
self.started_on = None
self.ended_on = None
self.message = None
self.exceptions_and_warnings = None
self.result_type = 'AgentJobLevelOutput'
| 39.683544 | 101 | 0.625199 |
from .migrate_sql_server_sql_mi_task_output import MigrateSqlServerSqlMITaskOutput
class MigrateSqlServerSqlMITaskOutputAgentJobLevel(MigrateSqlServerSqlMITaskOutput):
_validation = {
'id': {'readonly': True},
'result_type': {'required': True},
'name': {'readonly': True},
'is_enabled': {'readonly': True},
'state': {'readonly': True},
'started_on': {'readonly': True},
'ended_on': {'readonly': True},
'message': {'readonly': True},
'exceptions_and_warnings': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'result_type': {'key': 'resultType', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'is_enabled': {'key': 'isEnabled', 'type': 'bool'},
'state': {'key': 'state', 'type': 'str'},
'started_on': {'key': 'startedOn', 'type': 'iso-8601'},
'ended_on': {'key': 'endedOn', 'type': 'iso-8601'},
'message': {'key': 'message', 'type': 'str'},
'exceptions_and_warnings': {'key': 'exceptionsAndWarnings', 'type': '[ReportableException]'},
}
def __init__(self, **kwargs):
super(MigrateSqlServerSqlMITaskOutputAgentJobLevel, self).__init__(**kwargs)
self.name = None
self.is_enabled = None
self.state = None
self.started_on = None
self.ended_on = None
self.message = None
self.exceptions_and_warnings = None
self.result_type = 'AgentJobLevelOutput'
| true | true |
f724ab3e48fa4fed5007bb73f609343695c1c2e7 | 2,530 | py | Python | GPflow/test_gplvm.py | blutooth/dgp | bedbbc3595fbe124d7a06c3d6d64f9009304491e | [
"Apache-2.0"
] | 1 | 2018-09-06T04:42:37.000Z | 2018-09-06T04:42:37.000Z | GPflow/test_gplvm.py | blutooth/dgp | bedbbc3595fbe124d7a06c3d6d64f9009304491e | [
"Apache-2.0"
] | null | null | null | GPflow/test_gplvm.py | blutooth/dgp | bedbbc3595fbe124d7a06c3d6d64f9009304491e | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import kernels
import numpy as np
import unittest
import gplvm
class TestBayesianGPLVM(unittest.TestCase):
def setUp(self):
N = 10 # number of data points
D = 1 # latent dimensions
M = 5 # inducings points
R = 2 # data dimension
k = kernels.RBF(D)
Z = np.linspace(0,1,M)
Z = np.expand_dims(Z, D)
rng = np.random.RandomState(1)
Y = rng.randn(N,R)
self.m = gplvm.BayesianGPLVM(X_mean = np.zeros((N,D)),
X_var=np.ones((N,D)), Y=Y, kern=k, Z=Z)
def test_linearSolution(self):
# You could implement a standard GPLVM, and show that it recovers PCA when the kernel is linear ->
# How to deal with rotations and linear rescalings.
pass
def test_GPLVM_BGPLVM_Equivalence(self):
# You could set the variance of the BGPLVM to zero and show that it's the same as the GPLVM
# BGPLVM with variance to 0 is same as GPLVM
N = 10 # number of data points
Q = 1 # latent dimensions
M = 5 # inducing points
D = 2 # data dimension
k = kernels.RBF(Q)
Z = np.linspace(0, 1, M)
Z = np.expand_dims(Z, Q)
rng = np.random.RandomState(1)
Y = rng.randn(N, Q)
XInit = rng.rand(N, Q)
# use 0 variance for BGPLVM
m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z)
print(m)
m.X_var.fixed = True
ll = m.compute_log_likelihood()
print(ll)
m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z, X_prior_mean=np.zeros((N,Q)), X_prior_var = np.ones((N,Q)))
llprior = m.compute_log_likelihood()
print(m)
print(llprior)
assert ll == llprior
Z = np.linspace(0, 1, M*2)
Z = np.expand_dims(Z, Q)
m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z, X_prior_mean=np.zeros((N,Q)), X_prior_var = np.ones((N,Q)))
llmoreZ = m.compute_log_likelihood()
print(llmoreZ)
assert llmoreZ > ll
# m.optimize()
# mGPLVM = GPflow.gplvm.GPLVM(Y=Y, Q=Q, kern=k, XInit=XInit)
# mGPLVM.optimize()
# assert np.allclose(m.X_mean.value, mGPLVM.X.value)
# this does not work - f= +Infinity!
def test_gplvmOptimization(self):
print('Run optimisation')
# self.m.optimize()
if __name__ == "__main__":
unittest.main()
| 33.733333 | 146 | 0.581818 | from __future__ import print_function
import kernels
import numpy as np
import unittest
import gplvm
class TestBayesianGPLVM(unittest.TestCase):
def setUp(self):
N = 10
D = 1
M = 5
R = 2
k = kernels.RBF(D)
Z = np.linspace(0,1,M)
Z = np.expand_dims(Z, D)
rng = np.random.RandomState(1)
Y = rng.randn(N,R)
self.m = gplvm.BayesianGPLVM(X_mean = np.zeros((N,D)),
X_var=np.ones((N,D)), Y=Y, kern=k, Z=Z)
def test_linearSolution(self):
pass
def test_GPLVM_BGPLVM_Equivalence(self):
# BGPLVM with variance to 0 is same as GPLVM
N = 10 # number of data points
Q = 1 # latent dimensions
M = 5 # inducing points
D = 2 # data dimension
k = kernels.RBF(Q)
Z = np.linspace(0, 1, M)
Z = np.expand_dims(Z, Q)
rng = np.random.RandomState(1)
Y = rng.randn(N, Q)
XInit = rng.rand(N, Q)
# use 0 variance for BGPLVM
m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z)
print(m)
m.X_var.fixed = True
ll = m.compute_log_likelihood()
print(ll)
m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z, X_prior_mean=np.zeros((N,Q)), X_prior_var = np.ones((N,Q)))
llprior = m.compute_log_likelihood()
print(m)
print(llprior)
assert ll == llprior
Z = np.linspace(0, 1, M*2)
Z = np.expand_dims(Z, Q)
m = gplvm.BayesianGPLVM(X_mean=XInit, X_var=np.ones((N, Q)), Y=Y, kern=k, Z=Z, X_prior_mean=np.zeros((N,Q)), X_prior_var = np.ones((N,Q)))
llmoreZ = m.compute_log_likelihood()
print(llmoreZ)
assert llmoreZ > ll
# m.optimize()
# mGPLVM = GPflow.gplvm.GPLVM(Y=Y, Q=Q, kern=k, XInit=XInit)
# mGPLVM.optimize()
# assert np.allclose(m.X_mean.value, mGPLVM.X.value)
# this does not work - f= +Infinity!
def test_gplvmOptimization(self):
print('Run optimisation')
# self.m.optimize()
if __name__ == "__main__":
unittest.main()
| true | true |
f724ae22374b14f37ab0977ec53f524308417895 | 1,422 | py | Python | demo/wp_scalogram.py | astromaddie/pywavelets-py3 | 9d434929cb748eb44be86a4b712d8f3009326693 | [
"MIT"
] | 1 | 2018-03-13T10:44:47.000Z | 2018-03-13T10:44:47.000Z | demo/wp_scalogram.py | astromaddie/pywavelets-py3 | 9d434929cb748eb44be86a4b712d8f3009326693 | [
"MIT"
] | null | null | null | demo/wp_scalogram.py | astromaddie/pywavelets-py3 | 9d434929cb748eb44be86a4b712d8f3009326693 | [
"MIT"
] | 1 | 2018-03-13T10:44:54.000Z | 2018-03-13T10:44:54.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pywt
x = np.linspace(0, 1, num=512)
data = np.sin(250 * np.pi * x**2)
wavelet = 'db2'
level = 4
order = "freq" # other option is "normal"
interpolation = 'nearest'
cmap = plt.cm.cool
# Construct wavelet packet
wp = pywt.WaveletPacket(data, wavelet, 'sym', maxlevel=level)
nodes = wp.get_level(level, order=order)
labels = [n.path for n in nodes]
values = np.array([n.data for n in nodes], 'd')
values = abs(values)
# Show signal and wavelet packet coefficients
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, bottom=.03, left=.07, right=.97, top=.92)
ax = fig.add_subplot(2, 1, 1)
ax.set_title("linchirp signal")
ax.plot(x, data, 'b')
ax.set_xlim(0, x[-1])
ax = fig.add_subplot(2, 1, 2)
ax.set_title("Wavelet packet coefficients at level %d" % level)
ax.imshow(values, interpolation=interpolation, cmap=cmap, aspect="auto",
origin="lower", extent=[0, 1, 0, len(values)])
ax.set_yticks(np.arange(0.5, len(labels) + 0.5), labels)
# Show spectrogram and wavelet packet coefficients
fig2 = plt.figure()
ax2 = fig2.add_subplot(211)
ax2.specgram(data, NFFT=64, noverlap=32, cmap=cmap)
ax2.set_title("Spectrogram of signal")
ax3 = fig2.add_subplot(212)
ax3.imshow(values, origin='upper', extent=[-1,1,-1,1],
interpolation='nearest')
ax3.set_title("Wavelet packet coefficients")
plt.show()
| 27.346154 | 73 | 0.696203 |
import numpy as np
import matplotlib.pyplot as plt
import pywt
x = np.linspace(0, 1, num=512)
data = np.sin(250 * np.pi * x**2)
wavelet = 'db2'
level = 4
order = "freq"
interpolation = 'nearest'
cmap = plt.cm.cool
wp = pywt.WaveletPacket(data, wavelet, 'sym', maxlevel=level)
nodes = wp.get_level(level, order=order)
labels = [n.path for n in nodes]
values = np.array([n.data for n in nodes], 'd')
values = abs(values)
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, bottom=.03, left=.07, right=.97, top=.92)
ax = fig.add_subplot(2, 1, 1)
ax.set_title("linchirp signal")
ax.plot(x, data, 'b')
ax.set_xlim(0, x[-1])
ax = fig.add_subplot(2, 1, 2)
ax.set_title("Wavelet packet coefficients at level %d" % level)
ax.imshow(values, interpolation=interpolation, cmap=cmap, aspect="auto",
origin="lower", extent=[0, 1, 0, len(values)])
ax.set_yticks(np.arange(0.5, len(labels) + 0.5), labels)
fig2 = plt.figure()
ax2 = fig2.add_subplot(211)
ax2.specgram(data, NFFT=64, noverlap=32, cmap=cmap)
ax2.set_title("Spectrogram of signal")
ax3 = fig2.add_subplot(212)
ax3.imshow(values, origin='upper', extent=[-1,1,-1,1],
interpolation='nearest')
ax3.set_title("Wavelet packet coefficients")
plt.show()
| true | true |
f724ae68aed162517af1cec117aa39d242e353b8 | 6,048 | py | Python | scripts/init_theano_settings.py | dylanirion/wbia-plugin-cnn | cd0018b829de3f077ca289551492cdad84806ed6 | [
"Apache-2.0"
] | null | null | null | scripts/init_theano_settings.py | dylanirion/wbia-plugin-cnn | cd0018b829de3f077ca289551492cdad84806ed6 | [
"Apache-2.0"
] | 4 | 2020-07-02T19:25:43.000Z | 2020-08-27T18:05:15.000Z | scripts/init_theano_settings.py | dylanirion/wbia-plugin-cnn | cd0018b829de3f077ca289551492cdad84806ed6 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
References:
http://deeplearning.net/software/theano/library/config.html
Check Settings:
python -c 'import theano; print theano.config' | less
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import os
from os.path import join
(print, rrr, profile) = ut.inject2(__name__)
"""
CommandLine:
cd %CODE_DIR%/wbia_cnn/code
cd $CODE_DIR/wbia_cnn/code
code
cd wbia_cnn/code
python train.py
Purge from system and environ:
cd
python -c "import utool as ut; ut.total_purge_developed_repo('~/code/pylearn2')"
python -c "import utool as ut; ut.total_purge_developed_repo('~/code/Theano')"
python -c "import utool as ut; ut.total_purge_developed_repo('~/code/Lasagne')"
# Remove pylearn2 scripts
sudo rm /home/joncrall/venv/bin/pylearn2-*
sudo rm /usr/local/bin/pylearn2-*
locate pylearn2 | grep -v /home/joncrall/code/pylearn2 | grep -v /home/jason/code/pylearn2
pip uninstall theano
pip uninstall lasagne
pip uninstall pylearn2
sudo -H pip uninstall theano
sudo -H pip uninstall lasagne
sudo -H pip uninstall pylearn2
sudo pip uninstall theano
sudo pip uninstall lasagne
sudo pip uninstall pylearn2
# If they do try chowning to current user
sudo chown -R $USER:$USER ~/code/pylearn2
sudo chown -R $USER:$USER ~/code/Theano
sudo chown -R $USER:$USER ~/code/Lasagne
export GLOBAL_SITE_PKGS=$(python -c "import utool as ut; print(ut.get_global_dist_packages_dir())")
export LOCAL_SITE_PKGS=$(python -c "import utool as ut; print(ut.get_local_dist_packages_dir())")
export VENV_SITE_PKGS=$(python -c "import utool as ut; print(ut.get_site_packages_dir())")
# Test that they dont exist
python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import theano; print(theano.__version__)"
python -c "import lasagne; print(lasagne.__version__)"
PythonPrereqs:
co
git clone git://github.com/lisa-lab/pylearn2.git
git clone https://github.com/Theano/Theano.git
git clone https://github.com/Erotemic/Lasagne.git
cd ~/code/pylearn2 && git pull && python setup.py develop
cd ~/code/Theano && git pull && python setup.py develop
cd ~/code/Lasagne && git pull && python setup.py develop
python -c "import pylearn2; print(pylearn2.__file__)"
python -c "import theano; print(theano.__version__)"
python -c "import lasagne; print(lasagne.__version__)"
git checkout 8758ac1434175159e5c1f30123041799c2b6098a
OLD:
git clone https://github.com/Lasagne/Lasagne.git
pip install theano
pip install git+https://github.com/Lasagne/Lasagne.git
pip install git+git://github.com/lisa-lab/pylearn2.git
#pip install lasagne
#pip install pylearn2
Ensure CuDNN is installed
http://lasagne.readthedocs.io/en/latest/user/installation.html#cudnn
# Test if Theano Works with CUDNN
python -c "from theano.sandbox.cuda.dnn import dnn_available as d; print(d() or d.msg)"
# Need to register with nvidia
https://developer.nvidia.com/rdp/cudnn-download
# Check cuda version
nvcc --version
# Check if cuda is globally installed
ls -al /usr/local/cuda
# Check if CUDNN is globally installed
ls -al /usr/local/cuda/include/cudnn.h
ls -al /usr/local/cuda/lib64/cudnn*
# Download approprate version
cd ~/Downloads
# doesnt work if you dont sign in
# wget https://developer.nvidia.com/compute/machine-learning/cudnn/secure/v5.1/rc/7.5/cudnn-7.5-linux-x64-v5.1-rc-tgz
# Unpack appropriate version
cd ~/Downloads
7z x cudnn-7.5-linux-x64-v5.1-rc.tgz && 7z x -ocudnn5.1 cudnn-7.5-linux-x64-v5.1-rc.tar
7z x cudnn-7.5-linux-x64-v5.0-ga.tgz && 7z x -ocudnn5.0 cudnn-7.5-linux-x64-v5.0-ga.tar
7z x cudnn-7.0-linux-x64-v4.0-prod.tgz && 7z x -ocudnn4.0 cudnn-7.0-linux-x64-v4.0-prod.tar
tree ~/Downloads/cudnn5.1/
tree ~/Downloads/cudnn4/
# DEFINE WHERE CUDA LIVES
export CUDADIR=/usr/local/cuda
export TARGET_CUDNN_VERSION=5.1
MAIN_CUDNN_VERSION="$(echo $TARGET_CUDNN_VERSION | head -c 1)"
# Check CUDNN Install
ls -al $CUDADIR/include/cudnn.h
ls -al $CUDADIR/lib64/libcudnn*
#Look at other cuda install permissions
ls -al $CUDADIR/include/cublas.h
ls -al $CUDADIR/lib64/libcublas*
# REMOVE / UNINSTALL OLD CUDNN
sudo rm -rf $CUDADIR/include/cudnn.h
sudo rm -rf $CUDADIR/lib64/libcudnn*
# Extract into folder called cuda, need to move it to wherever cuda is installed
# cudnn consists of one header and 4 libraries
sudo cp -rv ~/Downloads/cudnn$TARGET_CUDNN_VERSION/cuda/include/cudnn.h $CUDADIR/include/cudnn.h
sudo cp -rv ~/Downloads/cudnn$TARGET_CUDNN_VERSION/cuda/lib64/libcudnn.so.$TARGET_CUDNN_VERSION* $CUDADIR/lib64/
sudo cp -rv ~/Downloads/cudnn$TARGET_CUDNN_VERSION/cuda/lib64/libcudnn_static.a $CUDADIR/lib64/
# Manually make symlinks (ones nvidia ships are broken)
sudo ln -s $CUDADIR/lib64/libcudnn.so.$TARGET_CUDNN_VERSION* $CUDADIR/lib64/libcudnn.so.$MAIN_CUDNN_VERSION
sudo ln -s $CUDADIR/lib64/libcudnn.so.$MAIN_CUDNN_VERSION $CUDADIR/lib64/libcudnn.so
# Set permissions to reflect cuda install
sudo chmod 755 /usr/local/cuda/lib64/libcudnn.so.$TARGET_CUDNN_VERSION*
# Check CUDNN Install
ls -al $CUDADIR/include/cudnn.h
ls -al $CUDADIR/lib64/libcudnn*
# Test if Theano Works with CUDNN
python -c "from theano.sandbox.cuda.dnn import dnn_available as d; print(d() or d.msg)"
"""
def init_theanorc():
theanorc_fpath = join(os.getenv('HOME'), '.theanorc')
theanorc_text = ut.codeblock(
"""
[global]
floatX = float32
device = gpu0
openmp = True
[nvcc]
fastmath = True
"""
)
if ut.checkpath(theanorc_fpath, verbose=True):
if not ut.arg_you_sure('overwrite?'):
return
ut.write_to(theanorc_fpath, theanorc_text)
if __name__ == '__main__':
init_theanorc()
| 33.787709 | 121 | 0.700562 |
from __future__ import absolute_import, division, print_function, unicode_literals
import utool as ut
import os
from os.path import join
(print, rrr, profile) = ut.inject2(__name__)
def init_theanorc():
theanorc_fpath = join(os.getenv('HOME'), '.theanorc')
theanorc_text = ut.codeblock(
"""
[global]
floatX = float32
device = gpu0
openmp = True
[nvcc]
fastmath = True
"""
)
if ut.checkpath(theanorc_fpath, verbose=True):
if not ut.arg_you_sure('overwrite?'):
return
ut.write_to(theanorc_fpath, theanorc_text)
if __name__ == '__main__':
init_theanorc()
| true | true |
f724aebbd5407f292e974e0c128452257538cb40 | 3,516 | py | Python | bindings/python/ensmallen/datasets/string/desulfotomaculumcopahuensis.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/desulfotomaculumcopahuensis.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/desulfotomaculumcopahuensis.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Desulfotomaculum copahuensis.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def DesulfotomaculumCopahuensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Desulfotomaculum copahuensis graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Desulfotomaculum copahuensis graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="DesulfotomaculumCopahuensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.485714 | 223 | 0.681741 | from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph
def DesulfotomaculumCopahuensis(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
return AutomaticallyRetrievedGraph(
graph_name="DesulfotomaculumCopahuensis",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| true | true |
f724af0aaa79792c4cd22f016025f032f8255a1c | 1,504 | py | Python | updater/update/ratelimit.py | codl/status.chitter.xyz | ed86a6163306c938a49bdef5dea4928ef7cd09cc | [
"BSD-3-Clause"
] | 1 | 2020-06-02T13:21:59.000Z | 2020-06-02T13:21:59.000Z | updater/update/ratelimit.py | codl/status.chitter.xyz | ed86a6163306c938a49bdef5dea4928ef7cd09cc | [
"BSD-3-Clause"
] | 678 | 2018-05-27T21:47:21.000Z | 2022-02-03T14:41:28.000Z | updater/update/ratelimit.py | codl/status.chitter.xyz | ed86a6163306c938a49bdef5dea4928ef7cd09cc | [
"BSD-3-Clause"
] | null | null | null | from redis import StrictRedis as Redis
from pathlib import Path
import hashlib
import time
lua_script_path = Path(__file__).parent / 'ratelimit.lua'
with open(lua_script_path) as f:
LUA_SCRIPT = f.read()
del lua_script_path # don't want it polluting the module
class RateLimit(object):
def __init__(self,
redis_url='redis://',
redis_key_prefix='ratelimit',
bucket_size=50,
bucket_period=30):
self.redis = Redis.from_url(redis_url)
self.script = self.redis.register_script(LUA_SCRIPT)
self.redis_key_prefix = redis_key_prefix
self.bucket_size = bucket_size
self.bucket_period = bucket_period
def _exec(self, identifier, clear=False):
identifier_h = hashlib.blake2s(
identifier.encode('utf-8'), digest_size=6).hexdigest()
token_count_key = "{}:{}:count".format(self.redis_key_prefix,
identifier_h)
token_last_add_key = "{}:{}:last-add".format(self.redis_key_prefix,
identifier_h)
keys = [token_count_key, token_last_add_key]
argv = [self.bucket_size, self.bucket_period, int(time.time())]
if clear:
argv += [True]
return self.script(keys, argv)
def hit(self, identifier):
return int(self._exec(identifier))
def clear(self, identifier):
self._exec(identifier, clear=True)
| 32.695652 | 75 | 0.611037 | from redis import StrictRedis as Redis
from pathlib import Path
import hashlib
import time
lua_script_path = Path(__file__).parent / 'ratelimit.lua'
with open(lua_script_path) as f:
LUA_SCRIPT = f.read()
del lua_script_path
class RateLimit(object):
def __init__(self,
redis_url='redis://',
redis_key_prefix='ratelimit',
bucket_size=50,
bucket_period=30):
self.redis = Redis.from_url(redis_url)
self.script = self.redis.register_script(LUA_SCRIPT)
self.redis_key_prefix = redis_key_prefix
self.bucket_size = bucket_size
self.bucket_period = bucket_period
def _exec(self, identifier, clear=False):
identifier_h = hashlib.blake2s(
identifier.encode('utf-8'), digest_size=6).hexdigest()
token_count_key = "{}:{}:count".format(self.redis_key_prefix,
identifier_h)
token_last_add_key = "{}:{}:last-add".format(self.redis_key_prefix,
identifier_h)
keys = [token_count_key, token_last_add_key]
argv = [self.bucket_size, self.bucket_period, int(time.time())]
if clear:
argv += [True]
return self.script(keys, argv)
def hit(self, identifier):
return int(self._exec(identifier))
def clear(self, identifier):
self._exec(identifier, clear=True)
| true | true |
f724af6e5df81a66575ddc711157d44b2bd75cca | 190 | py | Python | optimus/version.py | Pcosmin/Optimus | ef3306d1b752bbfb1959ddb9103786acb8e9b9ba | [
"Apache-2.0"
] | 1 | 2020-09-22T13:04:37.000Z | 2020-09-22T13:04:37.000Z | optimus/version.py | rafaelang/Optimus | 809088f41588c968b2e30210f98a494a497b07ff | [
"Apache-2.0"
] | null | null | null | optimus/version.py | rafaelang/Optimus | 809088f41588c968b2e30210f98a494a497b07ff | [
"Apache-2.0"
] | null | null | null | def _safe_int(string):
try:
return int(string)
except ValueError:
return string
__version__ = '3.0.6'
VERSION = tuple(_safe_int(x) for x in __version__.split('.'))
| 19 | 61 | 0.642105 | def _safe_int(string):
try:
return int(string)
except ValueError:
return string
__version__ = '3.0.6'
VERSION = tuple(_safe_int(x) for x in __version__.split('.'))
| true | true |
f724af7352302d77818ce7630117090761337ead | 1,853 | py | Python | server/plato/test/test_user_model.py | zhlooking/plato | 9daf0dfd8b376603453eadf2d981c71d3adb2632 | [
"MIT"
] | null | null | null | server/plato/test/test_user_model.py | zhlooking/plato | 9daf0dfd8b376603453eadf2d981c71d3adb2632 | [
"MIT"
] | null | null | null | server/plato/test/test_user_model.py | zhlooking/plato | 9daf0dfd8b376603453eadf2d981c71d3adb2632 | [
"MIT"
] | null | null | null | from plato.test.base import BaseTestCase
from sqlalchemy.exc import IntegrityError
from plato import db
from plato.model.user import User
from plato.test.utils import add_user
class TestUserModel(BaseTestCase):
def test_user_model(self):
user = add_user('foo', 'foo@bar.com', 'test_pwd')
self.assertTrue(user.id)
self.assertEqual('foo', user.username)
self.assertEqual('foo@bar.com', user.email)
self.assertTrue(user.active)
self.assertTrue(user.created_at)
self.assertTrue(user.password)
self.assertTrue(user.admin == False)
def test_add_user_duplicate_username(self):
add_user('foo', 'foo@bar.com', 'test_pwd')
duplicate_user = User('foo', 'foo_1@bar.com', 'test_pwd')
db.session.add(duplicate_user)
self.assertRaises(IntegrityError, db.session.commit)
def test_add_user_duplicate_email(self):
add_user('foo', 'foo@bar.com', 'test_pwd')
duplicate_user = User('foo_1', 'foo@bar.com', 'test_pwd')
db.session.add(duplicate_user)
self.assertRaises(IntegrityError, db.session.commit)
def test_passwords_are_random(self):
user_foo = add_user('foo', 'foo@bar.com', 'test_pwd')
user_bar = add_user('bar', 'bar@bar.com', 'test_pwd')
self.assertNotEqual(user_foo.password, user_bar.password)
def test_encode_auth_token(self):
user = add_user('test@test.com', 'test@test.com', 'test')
auth_token = user.encode_auth_token(user.id)
self.assertTrue(isinstance(auth_token, bytes))
def test_decode_auth_token(self):
user = add_user('test@test.com', 'test@test.com', 'test')
auth_token = user.encode_auth_token(user.id)
self.assertTrue(isinstance(auth_token, bytes))
self.assertTrue(User.decode_auth_token(auth_token), user.id)
| 39.425532 | 68 | 0.683216 | from plato.test.base import BaseTestCase
from sqlalchemy.exc import IntegrityError
from plato import db
from plato.model.user import User
from plato.test.utils import add_user
class TestUserModel(BaseTestCase):
def test_user_model(self):
user = add_user('foo', 'foo@bar.com', 'test_pwd')
self.assertTrue(user.id)
self.assertEqual('foo', user.username)
self.assertEqual('foo@bar.com', user.email)
self.assertTrue(user.active)
self.assertTrue(user.created_at)
self.assertTrue(user.password)
self.assertTrue(user.admin == False)
def test_add_user_duplicate_username(self):
add_user('foo', 'foo@bar.com', 'test_pwd')
duplicate_user = User('foo', 'foo_1@bar.com', 'test_pwd')
db.session.add(duplicate_user)
self.assertRaises(IntegrityError, db.session.commit)
def test_add_user_duplicate_email(self):
add_user('foo', 'foo@bar.com', 'test_pwd')
duplicate_user = User('foo_1', 'foo@bar.com', 'test_pwd')
db.session.add(duplicate_user)
self.assertRaises(IntegrityError, db.session.commit)
def test_passwords_are_random(self):
user_foo = add_user('foo', 'foo@bar.com', 'test_pwd')
user_bar = add_user('bar', 'bar@bar.com', 'test_pwd')
self.assertNotEqual(user_foo.password, user_bar.password)
def test_encode_auth_token(self):
user = add_user('test@test.com', 'test@test.com', 'test')
auth_token = user.encode_auth_token(user.id)
self.assertTrue(isinstance(auth_token, bytes))
def test_decode_auth_token(self):
user = add_user('test@test.com', 'test@test.com', 'test')
auth_token = user.encode_auth_token(user.id)
self.assertTrue(isinstance(auth_token, bytes))
self.assertTrue(User.decode_auth_token(auth_token), user.id)
| true | true |
f724aff84f870ddcfdf9843c043f46bf8a185053 | 1,034 | py | Python | Activities Week 7 (social analytics)/Social_Analytics_Part3/Day3/ChatterBot.py | lraynes/ClassActivities | 920df2331f39c8a89477ab73e4393675a299d02d | [
"MIT"
] | null | null | null | Activities Week 7 (social analytics)/Social_Analytics_Part3/Day3/ChatterBot.py | lraynes/ClassActivities | 920df2331f39c8a89477ab73e4393675a299d02d | [
"MIT"
] | null | null | null | Activities Week 7 (social analytics)/Social_Analytics_Part3/Day3/ChatterBot.py | lraynes/ClassActivities | 920df2331f39c8a89477ab73e4393675a299d02d | [
"MIT"
] | null | null | null | # Dependencies
import tweepy
import time
import json
from config import consumer_key, consumer_secret, access_token, access_token_secret
# Twitter API Keys
consumer_key = consumer_key
consumer_secret = consumer_secret
access_token = access_token
access_token_secret = access_token_secret
# Setup Tweepy API Authentication
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
# Create a function that tweets
def TweetOut(tweet_number):
api.update_status(
"Can't stop. Won't stop. Chatting! This is Tweet #%s!" %
tweet_number)
# Create a function that calls the TweetOut function every minute
counter = 0
# Infinite loop
while(True):
# Call the TweetQuotes function and specify the tweet number
TweetOut(counter)
# Once tweeted, wait 60 seconds before doing anything else
time.sleep(60)
# Add 1 to the counter prior to re-running the loop
counter = counter + 1 | 25.85 | 83 | 0.766925 |
import tweepy
import time
import json
from config import consumer_key, consumer_secret, access_token, access_token_secret
consumer_key = consumer_key
consumer_secret = consumer_secret
access_token = access_token
access_token_secret = access_token_secret
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth, parser=tweepy.parsers.JSONParser())
def TweetOut(tweet_number):
api.update_status(
"Can't stop. Won't stop. Chatting! This is Tweet #%s!" %
tweet_number)
counter = 0
while(True):
TweetOut(counter)
time.sleep(60)
counter = counter + 1 | true | true |
f724b011af2d3189f97b37e16bda3a9e70054f15 | 410 | py | Python | scripts/wk/cfg/log.py | 2Shirt/WizardK | 82a2e7f85c80a52f892c1553e7a45ec0174e7bc6 | [
"MIT"
] | null | null | null | scripts/wk/cfg/log.py | 2Shirt/WizardK | 82a2e7f85c80a52f892c1553e7a45ec0174e7bc6 | [
"MIT"
] | 178 | 2017-11-17T19:14:31.000Z | 2021-12-15T07:43:29.000Z | scripts/wk/cfg/log.py | 2Shirt/WizardK | 82a2e7f85c80a52f892c1553e7a45ec0174e7bc6 | [
"MIT"
] | 1 | 2017-11-17T19:32:36.000Z | 2017-11-17T19:32:36.000Z | """WizardKit: Config - Log"""
# vim: sts=2 sw=2 ts=2
DEBUG = {
'level': 'DEBUG',
'format': '[%(asctime)s %(levelname)s] [%(name)s.%(funcName)s] %(message)s',
'datefmt': '%Y-%m-%d %H%M%S%z',
}
DEFAULT = {
'level': 'INFO',
'format': '[%(asctime)s %(levelname)s] %(message)s',
'datefmt': '%Y-%m-%d %H%M%z',
}
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| 21.578947 | 78 | 0.55122 |
DEBUG = {
'level': 'DEBUG',
'format': '[%(asctime)s %(levelname)s] [%(name)s.%(funcName)s] %(message)s',
'datefmt': '%Y-%m-%d %H%M%S%z',
}
DEFAULT = {
'level': 'INFO',
'format': '[%(asctime)s %(levelname)s] %(message)s',
'datefmt': '%Y-%m-%d %H%M%z',
}
if __name__ == '__main__':
print("This file is not meant to be called directly.")
| true | true |
f724b08ac071745f08342d655971d0e5d1d90152 | 10,921 | py | Python | tests/system/test_integration.py | kaiyan-sheng/apm-server | fe1db82a1043508088e5db7c057b84b1e3ce474f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/system/test_integration.py | kaiyan-sheng/apm-server | fe1db82a1043508088e5db7c057b84b1e3ce474f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/system/test_integration.py | kaiyan-sheng/apm-server | fe1db82a1043508088e5db7c057b84b1e3ce474f | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import time
from apmserver import integration_test
from apmserver import ClientSideBaseTest, ElasticTest, ExpvarBaseTest, ProcStartupFailureTest
from helper import wait_until
from es_helper import index_metric, index_transaction, index_error, index_span, index_onboarding, index_name
@integration_test
class Test(ElasticTest):
def test_template(self):
"""
This test starts the beat and checks that the template has been loaded to ES
"""
wait_until(lambda: self.es.indices.exists(index_onboarding))
templates = self.es.indices.get_template(index_name)
assert len(templates) == 1
t = templates[index_name]
total_fields_limit = t['settings']['index']['mapping']['total_fields']['limit']
assert total_fields_limit == "2000", total_fields_limit
def test_tags_type(self):
self.load_docs_with_template(self.get_payload_path("transactions_spans.ndjson"),
self.intake_url, 'transaction', 8)
self.assert_no_logged_warnings()
mappings = self.es.indices.get_field_mapping(index=index_transaction, fields="context.tags.*")
for name, metric in mappings["{}-000001".format(index_transaction)]["mappings"].items():
fullname = metric["full_name"]
for mapping in metric["mapping"].values():
mtype = mapping["type"]
if fullname.startswith("context.tags.bool"):
assert mtype == "boolean", name + " mapped as " + mtype + ", not boolean"
elif fullname.startswith("context.tags.number"):
assert mtype == "scaled_float", name + " mapped as " + mtype + ", not scaled_float"
else:
assert mtype == "keyword", name + " mapped as " + mtype + ", not keyword"
def test_load_docs_with_template_and_add_transaction(self):
"""
This test starts the beat with a loaded template and sends transaction data to elasticsearch.
It verifies that all data make it into ES, means data is compatible with the template
and data are in expected format.
"""
self.load_docs_with_template(self.get_payload_path("transactions_spans.ndjson"),
self.intake_url, 'transaction', 8)
self.assert_no_logged_warnings()
# compare existing ES documents for transactions with new ones
transaction_docs = self.wait_for_events('transaction', 3, index=index_transaction)
self.approve_docs('transaction', transaction_docs)
# compare existing ES documents for spans with new ones
span_docs = self.wait_for_events('transaction', 5, index=index_span)
self.approve_docs('spans', span_docs)
def test_load_docs_with_template_and_add_error(self):
"""
This test starts the beat with a loaded template and sends error data to elasticsearch.
It verifies that all data make it into ES means data is compatible with the template.
"""
self.load_docs_with_template(self.get_error_payload_path(), self.intake_url, 'error', 4)
self.assert_no_logged_warnings()
# compare existing ES documents for errors with new ones
error_docs = self.wait_for_events('error', 4, index=index_error)
self.approve_docs('error', error_docs)
@integration_test
class EnrichEventIntegrationTest(ClientSideBaseTest, ElasticTest):
def test_backend_error(self):
# for backend events library_frame information should not be changed,
# as no regex pattern is defined.
self.load_docs_with_template(self.get_backend_error_payload_path(),
self.backend_intake_url,
'error',
4)
self.check_library_frames({"true": 1, "false": 0, "empty": 3}, index_error)
def test_rum_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
1)
self.check_library_frames({"true": 5, "false": 0, "empty": 1}, index_error)
def test_rum_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.intake_url,
'transaction',
2)
self.check_library_frames({"true": 1, "false": 0, "empty": 1}, index_span)
def test_grouping_key_for_error(self):
# upload the same error, once via rum, once via backend endpoint
# check they don't have the same grouping key, as the
# `rum.exclude_from_grouping` should only be applied to the rum error.
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
1)
self.load_docs_with_template(self.get_error_payload_path(),
self.backend_intake_url,
'error',
2)
rs = self.es.search(index=index_error)
docs = rs['hits']['hits']
grouping_key1 = docs[0]["_source"]["error"]["grouping_key"]
grouping_key2 = docs[1]["_source"]["error"]["grouping_key"]
assert grouping_key1 != grouping_key2
def check_library_frames(self, library_frames, index_name):
rs = self.es.search(index=index_name)
l_frames = {"true": 0, "false": 0, "empty": 0}
for doc in rs['hits']['hits']:
if "error" in doc["_source"]:
err = doc["_source"]["error"]
for exception in err.get("exception", []):
self.count_library_frames(exception, l_frames)
if "log" in err:
self.count_library_frames(err["log"], l_frames)
elif "span" in doc["_source"]:
span = doc["_source"]["span"]
self.count_library_frames(span, l_frames)
assert l_frames == library_frames, "found {}, expected {}".format(
l_frames, library_frames)
@staticmethod
def count_library_frames(doc, lf):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
if "library_frame" in frame:
k = "true" if frame["library_frame"] else "false"
lf[k] += 1
else:
lf["empty"] += 1
@integration_test
class ILMDisabledIntegrationTest(ElasticTest):
config_overrides = {"ilm_enabled": "false"}
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index="{}-2017.05.09".format(index_error))
class OverrideIndicesTest(ElasticTest):
def config(self):
cfg = super(OverrideIndicesTest, self).config()
cfg.update({"override_index": index_name,
"override_template": index_name})
return cfg
@integration_test
class OverrideIndicesIntegrationTest(OverrideIndicesTest):
# default ILM=auto disables ILM when custom indices given
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index=index_name)
self.load_docs_with_template(self.get_payload_path("transactions_spans_rum.ndjson"),
self.intake_url,
'transaction',
2,
query_index=index_name)
# check that every document is indexed once in the expected index (incl.1 onboarding doc)
assert 4+2+1 == self.es.count(index=index_name)['count']
@integration_test
class OverrideIndicesILMFalseIntegrationTest(OverrideIndicesTest):
config_overrides = {"ilm_enabled": "false"}
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index=index_name)
assert 4+1 == self.es.count(index=index_name)['count']
@integration_test
class OverrideIndicesILMTrueIntegrationTest(OverrideIndicesTest):
config_overrides = {"ilm_enabled": "true"}
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index=self.ilm_index(index_error))
assert 4 == self.es.count(index=self.ilm_index(index_error))['count']
@integration_test
class OverrideIndicesFailureIntegrationTest(ProcStartupFailureTest):
config_overrides = {
"override_index": "apm-foo",
"elasticsearch_host": "localhost:8200",
"file_enabled": "false",
}
def test_template_setup_error(self):
loaded_msg = "Exiting: `setup.template.name` and `setup.template.pattern` have to be set"
wait_until(lambda: self.log_contains(loaded_msg), max_timeout=5)
@integration_test
class ExpvarDisabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "false"}
def test_expvar_exists(self):
"""expvar disabled, should 404"""
r = self.get_debug_vars()
assert r.status_code == 404, r.status_code
@integration_test
class ExpvarEnabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true"}
def test_expvar_exists(self):
"""expvar enabled, should 200"""
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
@integration_test
class ExpvarCustomUrlIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true", "expvar_url": "/foo"}
expvar_url = ExpvarBaseTest.expvar_url.replace("/debug/vars", "/foo")
def test_expvar_exists(self):
"""expvar enabled, should 200"""
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
| 42.996063 | 108 | 0.597839 | import time
from apmserver import integration_test
from apmserver import ClientSideBaseTest, ElasticTest, ExpvarBaseTest, ProcStartupFailureTest
from helper import wait_until
from es_helper import index_metric, index_transaction, index_error, index_span, index_onboarding, index_name
@integration_test
class Test(ElasticTest):
def test_template(self):
wait_until(lambda: self.es.indices.exists(index_onboarding))
templates = self.es.indices.get_template(index_name)
assert len(templates) == 1
t = templates[index_name]
total_fields_limit = t['settings']['index']['mapping']['total_fields']['limit']
assert total_fields_limit == "2000", total_fields_limit
def test_tags_type(self):
self.load_docs_with_template(self.get_payload_path("transactions_spans.ndjson"),
self.intake_url, 'transaction', 8)
self.assert_no_logged_warnings()
mappings = self.es.indices.get_field_mapping(index=index_transaction, fields="context.tags.*")
for name, metric in mappings["{}-000001".format(index_transaction)]["mappings"].items():
fullname = metric["full_name"]
for mapping in metric["mapping"].values():
mtype = mapping["type"]
if fullname.startswith("context.tags.bool"):
assert mtype == "boolean", name + " mapped as " + mtype + ", not boolean"
elif fullname.startswith("context.tags.number"):
assert mtype == "scaled_float", name + " mapped as " + mtype + ", not scaled_float"
else:
assert mtype == "keyword", name + " mapped as " + mtype + ", not keyword"
def test_load_docs_with_template_and_add_transaction(self):
self.load_docs_with_template(self.get_payload_path("transactions_spans.ndjson"),
self.intake_url, 'transaction', 8)
self.assert_no_logged_warnings()
transaction_docs = self.wait_for_events('transaction', 3, index=index_transaction)
self.approve_docs('transaction', transaction_docs)
span_docs = self.wait_for_events('transaction', 5, index=index_span)
self.approve_docs('spans', span_docs)
def test_load_docs_with_template_and_add_error(self):
self.load_docs_with_template(self.get_error_payload_path(), self.intake_url, 'error', 4)
self.assert_no_logged_warnings()
error_docs = self.wait_for_events('error', 4, index=index_error)
self.approve_docs('error', error_docs)
@integration_test
class EnrichEventIntegrationTest(ClientSideBaseTest, ElasticTest):
def test_backend_error(self):
self.load_docs_with_template(self.get_backend_error_payload_path(),
self.backend_intake_url,
'error',
4)
self.check_library_frames({"true": 1, "false": 0, "empty": 3}, index_error)
def test_rum_error(self):
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
1)
self.check_library_frames({"true": 5, "false": 0, "empty": 1}, index_error)
def test_rum_transaction(self):
self.load_docs_with_template(self.get_transaction_payload_path(),
self.intake_url,
'transaction',
2)
self.check_library_frames({"true": 1, "false": 0, "empty": 1}, index_span)
def test_grouping_key_for_error(self):
# `rum.exclude_from_grouping` should only be applied to the rum error.
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
1)
self.load_docs_with_template(self.get_error_payload_path(),
self.backend_intake_url,
'error',
2)
rs = self.es.search(index=index_error)
docs = rs['hits']['hits']
grouping_key1 = docs[0]["_source"]["error"]["grouping_key"]
grouping_key2 = docs[1]["_source"]["error"]["grouping_key"]
assert grouping_key1 != grouping_key2
def check_library_frames(self, library_frames, index_name):
rs = self.es.search(index=index_name)
l_frames = {"true": 0, "false": 0, "empty": 0}
for doc in rs['hits']['hits']:
if "error" in doc["_source"]:
err = doc["_source"]["error"]
for exception in err.get("exception", []):
self.count_library_frames(exception, l_frames)
if "log" in err:
self.count_library_frames(err["log"], l_frames)
elif "span" in doc["_source"]:
span = doc["_source"]["span"]
self.count_library_frames(span, l_frames)
assert l_frames == library_frames, "found {}, expected {}".format(
l_frames, library_frames)
@staticmethod
def count_library_frames(doc, lf):
if "stacktrace" not in doc:
return
for frame in doc["stacktrace"]:
if "library_frame" in frame:
k = "true" if frame["library_frame"] else "false"
lf[k] += 1
else:
lf["empty"] += 1
@integration_test
class ILMDisabledIntegrationTest(ElasticTest):
config_overrides = {"ilm_enabled": "false"}
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index="{}-2017.05.09".format(index_error))
class OverrideIndicesTest(ElasticTest):
def config(self):
cfg = super(OverrideIndicesTest, self).config()
cfg.update({"override_index": index_name,
"override_template": index_name})
return cfg
@integration_test
class OverrideIndicesIntegrationTest(OverrideIndicesTest):
# default ILM=auto disables ILM when custom indices given
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index=index_name)
self.load_docs_with_template(self.get_payload_path("transactions_spans_rum.ndjson"),
self.intake_url,
'transaction',
2,
query_index=index_name)
# check that every document is indexed once in the expected index (incl.1 onboarding doc)
assert 4+2+1 == self.es.count(index=index_name)['count']
@integration_test
class OverrideIndicesILMFalseIntegrationTest(OverrideIndicesTest):
config_overrides = {"ilm_enabled": "false"}
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index=index_name)
assert 4+1 == self.es.count(index=index_name)['count']
@integration_test
class OverrideIndicesILMTrueIntegrationTest(OverrideIndicesTest):
config_overrides = {"ilm_enabled": "true"}
def test_override_indices_config(self):
# load error and transaction document to ES
self.load_docs_with_template(self.get_error_payload_path(),
self.intake_url,
'error',
4,
query_index=self.ilm_index(index_error))
assert 4 == self.es.count(index=self.ilm_index(index_error))['count']
@integration_test
class OverrideIndicesFailureIntegrationTest(ProcStartupFailureTest):
config_overrides = {
"override_index": "apm-foo",
"elasticsearch_host": "localhost:8200",
"file_enabled": "false",
}
def test_template_setup_error(self):
loaded_msg = "Exiting: `setup.template.name` and `setup.template.pattern` have to be set"
wait_until(lambda: self.log_contains(loaded_msg), max_timeout=5)
@integration_test
class ExpvarDisabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "false"}
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 404, r.status_code
@integration_test
class ExpvarEnabledIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true"}
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
@integration_test
class ExpvarCustomUrlIntegrationTest(ExpvarBaseTest):
config_overrides = {"expvar_enabled": "true", "expvar_url": "/foo"}
expvar_url = ExpvarBaseTest.expvar_url.replace("/debug/vars", "/foo")
def test_expvar_exists(self):
r = self.get_debug_vars()
assert r.status_code == 200, r.status_code
| true | true |
f724b0962b84b5c8a44fd19d50552d5624391c1e | 1,408 | py | Python | bcs-ui/backend/tests/container_service/observability/log_stream/test_utils.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 599 | 2019-06-25T03:20:46.000Z | 2022-03-31T12:14:33.000Z | bcs-ui/backend/tests/container_service/observability/log_stream/test_utils.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 537 | 2019-06-27T06:03:44.000Z | 2022-03-31T12:10:01.000Z | bcs-ui/backend/tests/container_service/observability/log_stream/test_utils.py | laodiu/bk-bcs | 2a956a42101ff6487ff521fb3ef429805bfa7e26 | [
"Apache-2.0"
] | 214 | 2019-06-25T03:26:05.000Z | 2022-03-31T07:52:03.000Z | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from backend.container_service.observability.log_stream import utils
def test_refine_k8s_logs(log_content):
logs = utils.refine_k8s_logs(log_content, None)
assert len(logs) == 10
assert logs[0].time == '2021-05-19T12:03:52.516011121Z'
def test_calc_since_time(log_content):
logs = utils.refine_k8s_logs(log_content, None)
sine_time = utils.calc_since_time(logs[0].time, logs[-1].time)
assert sine_time == '2021-05-19T12:03:10.125788125Z'
def test_calc_previous_page(log_content):
logs = utils.refine_k8s_logs(log_content, None)
page = utils.calc_previous_page(logs, {'container_name': "", "previous": ""}, "")
assert page != ""
| 41.411765 | 115 | 0.757102 |
from backend.container_service.observability.log_stream import utils
def test_refine_k8s_logs(log_content):
logs = utils.refine_k8s_logs(log_content, None)
assert len(logs) == 10
assert logs[0].time == '2021-05-19T12:03:52.516011121Z'
def test_calc_since_time(log_content):
logs = utils.refine_k8s_logs(log_content, None)
sine_time = utils.calc_since_time(logs[0].time, logs[-1].time)
assert sine_time == '2021-05-19T12:03:10.125788125Z'
def test_calc_previous_page(log_content):
logs = utils.refine_k8s_logs(log_content, None)
page = utils.calc_previous_page(logs, {'container_name': "", "previous": ""}, "")
assert page != ""
| true | true |
f724b0ddd809230cda004541ff8a28dab8b18b75 | 1,462 | py | Python | python/logs/example-logging.py | jgordo04/housinginsights_temp | 588e912de31b7f50f7239af0bd4dfeaa693616bd | [
"MIT"
] | null | null | null | python/logs/example-logging.py | jgordo04/housinginsights_temp | 588e912de31b7f50f7239af0bd4dfeaa693616bd | [
"MIT"
] | null | null | null | python/logs/example-logging.py | jgordo04/housinginsights_temp | 588e912de31b7f50f7239af0bd4dfeaa693616bd | [
"MIT"
] | null | null | null | import logging
#Configure logging
logging_filename = "../logs/example.log"
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
#----------------
# Example logging
#----------------
# When you are writing code, instead of using the 'print' statement (which only
# is shown on the command line), you can instead use logging to write stuff to the log
# files.
# Benefit: Easier to sort through complex stuff, when you want to print lots of things
# as you work through a bug.
# Downside: Don't forget to delete your log files from time to time - they will get big
# They will be recreated next time you start the program.
# To see logging in action, run this file and then look in the newly created example.log file
# Every time you re-run this file, messages will be *added* to the log file
# Every time you delete the log file, and then re-run this file it will be created fresh.
logging.warning("--------------------starting module------------------")
logging.error("My error message")
logging.critical("My super bad error message")
logging.warning("This is a message that would always be written to logs")
logging.info("This message only comes through when level=logging.DEBUG")
#adding stack_info=True makes a log also report where it was called from (e.g. line 29), like a regular python error
logging.debug("this is a debug message", stack_info=True)
print("Example logging complete! Open example.log to see what happened.")
| 44.30303 | 116 | 0.725718 | import logging
logging_filename = "../logs/example.log"
logging.basicConfig(filename=logging_filename, level=logging.DEBUG)
# They will be recreated next time you start the program.
# To see logging in action, run this file and then look in the newly created example.log file
# Every time you re-run this file, messages will be *added* to the log file
# Every time you delete the log file, and then re-run this file it will be created fresh.
logging.warning("--------------------starting module------------------")
logging.error("My error message")
logging.critical("My super bad error message")
logging.warning("This is a message that would always be written to logs")
logging.info("This message only comes through when level=logging.DEBUG")
#adding stack_info=True makes a log also report where it was called from (e.g. line 29), like a regular python error
logging.debug("this is a debug message", stack_info=True)
print("Example logging complete! Open example.log to see what happened.")
| true | true |
f724b12ab738483eea567de9687c75d90ba7a949 | 788 | py | Python | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GLX/NV/multigpu_context.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GLX/NV/multigpu_context.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | OpenGLWrapper_JE/venv/Lib/site-packages/OpenGL/GLX/NV/multigpu_context.py | JE-Chen/je_old_repo | a8b2f1ac2eec25758bd15b71c64b59b27e0bcda5 | [
"MIT"
] | null | null | null | '''OpenGL extension NV.multigpu_context
This module customises the behaviour of the
OpenGL.raw.GLX.NV.multigpu_context to provide a more
Python-friendly API
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/NV/multigpu_context.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.NV.multigpu_context import *
from OpenGL.raw.GLX.NV.multigpu_context import _EXTENSION_NAME
def glInitMultigpuContextNV():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION | 34.26087 | 72 | 0.785533 | from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GLX import _types, _glgets
from OpenGL.raw.GLX.NV.multigpu_context import *
from OpenGL.raw.GLX.NV.multigpu_context import _EXTENSION_NAME
def glInitMultigpuContextNV():
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
| true | true |
f724b18933f6ba44d5bf81c468e375a0ec86b207 | 2,314 | py | Python | creategroup.py | vnitinv/JChat | a6fee1b8cb07f8ed4bcda404e0519e0e580646da | [
"Apache-2.0"
] | 3 | 2016-04-20T01:33:32.000Z | 2016-07-19T12:24:27.000Z | creategroup.py | vnitinv/JChat | a6fee1b8cb07f8ed4bcda404e0519e0e580646da | [
"Apache-2.0"
] | null | null | null | creategroup.py | vnitinv/JChat | a6fee1b8cb07f8ed4bcda404e0519e0e580646da | [
"Apache-2.0"
] | null | null | null | from PyQt4 import QtCore, QtGui
from boxes import ConnectedDevice
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CreateGroup(object):
def setupUi(self, CreateGroup):
self.cd_cg = ConnectedDevice()
CreateGroup.setObjectName(_fromUtf8("CreateGroup"))
CreateGroup.resize(405, 114)
self.gridLayout = QtGui.QGridLayout(CreateGroup)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.grou_name_label = QtGui.QLabel(CreateGroup)
self.grou_name_label.setObjectName(_fromUtf8("grou_name_label"))
self.horizontalLayout.addWidget(self.grou_name_label)
self.group_name_lineEdit = QtGui.QLineEdit(CreateGroup)
self.group_name_lineEdit.setDragEnabled(True)
self.group_name_lineEdit.setObjectName(_fromUtf8("group_name_lineEdit"))
self.horizontalLayout.addWidget(self.group_name_lineEdit)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.cg_pushButton = QtGui.QPushButton(CreateGroup)
self.cg_pushButton.setObjectName(_fromUtf8("pushButton"))
self.gridLayout.addWidget(self.cg_pushButton, 1, 0, 1, 1)
self.retranslateUi(CreateGroup)
QtCore.QObject.connect(self.cg_pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.updateGroups)
QtCore.QMetaObject.connectSlotsByName(CreateGroup)
def updateGroups(self):
host = str(self.group_name_lineEdit.text())
if host not in self.cd_cg.groups:
self.cd_cg.groups[host] = []
def retranslateUi(self, CreateGroup):
CreateGroup.setWindowTitle(_translate("CreateGroup", "Group", None))
self.grou_name_label.setText(_translate("CreateGroup", "Group Name:", None))
self.cg_pushButton.setText(_translate("CreateGroup", "Create", None))
| 43.660377 | 108 | 0.723423 | from PyQt4 import QtCore, QtGui
from boxes import ConnectedDevice
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_CreateGroup(object):
def setupUi(self, CreateGroup):
self.cd_cg = ConnectedDevice()
CreateGroup.setObjectName(_fromUtf8("CreateGroup"))
CreateGroup.resize(405, 114)
self.gridLayout = QtGui.QGridLayout(CreateGroup)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.horizontalLayout = QtGui.QHBoxLayout()
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.grou_name_label = QtGui.QLabel(CreateGroup)
self.grou_name_label.setObjectName(_fromUtf8("grou_name_label"))
self.horizontalLayout.addWidget(self.grou_name_label)
self.group_name_lineEdit = QtGui.QLineEdit(CreateGroup)
self.group_name_lineEdit.setDragEnabled(True)
self.group_name_lineEdit.setObjectName(_fromUtf8("group_name_lineEdit"))
self.horizontalLayout.addWidget(self.group_name_lineEdit)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.cg_pushButton = QtGui.QPushButton(CreateGroup)
self.cg_pushButton.setObjectName(_fromUtf8("pushButton"))
self.gridLayout.addWidget(self.cg_pushButton, 1, 0, 1, 1)
self.retranslateUi(CreateGroup)
QtCore.QObject.connect(self.cg_pushButton, QtCore.SIGNAL(_fromUtf8("clicked()")), self.updateGroups)
QtCore.QMetaObject.connectSlotsByName(CreateGroup)
def updateGroups(self):
host = str(self.group_name_lineEdit.text())
if host not in self.cd_cg.groups:
self.cd_cg.groups[host] = []
def retranslateUi(self, CreateGroup):
CreateGroup.setWindowTitle(_translate("CreateGroup", "Group", None))
self.grou_name_label.setText(_translate("CreateGroup", "Group Name:", None))
self.cg_pushButton.setText(_translate("CreateGroup", "Create", None))
| true | true |
f724b33b9c2842b58bfecbe90baa6d08d858641f | 201 | py | Python | test.py | komireddys/Python_flask | c2e9747064f4b1a4b6d79f729d8ab1ec62e0d706 | [
"MIT"
] | null | null | null | test.py | komireddys/Python_flask | c2e9747064f4b1a4b6d79f729d8ab1ec62e0d706 | [
"MIT"
] | null | null | null | test.py | komireddys/Python_flask | c2e9747064f4b1a4b6d79f729d8ab1ec62e0d706 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "<h1>Welcome to Python Flask App!</h1> <p1>hello this sample page</p1>"
if __name__ == "__main__":
app.run()
| 20.1 | 82 | 0.656716 | from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return "<h1>Welcome to Python Flask App!</h1> <p1>hello this sample page</p1>"
if __name__ == "__main__":
app.run()
| true | true |
f724b3e43741d780155776413bbee552df6dfdf4 | 3,183 | py | Python | optax/_src/constrain.py | VE-FORBRYDERNE/optax | 0d5421240cc7d4bf18fbed44fc5e5e2382a6e884 | [
"Apache-2.0"
] | null | null | null | optax/_src/constrain.py | VE-FORBRYDERNE/optax | 0d5421240cc7d4bf18fbed44fc5e5e2382a6e884 | [
"Apache-2.0"
] | null | null | null | optax/_src/constrain.py | VE-FORBRYDERNE/optax | 0d5421240cc7d4bf18fbed44fc5e5e2382a6e884 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 DeepMind Technologies Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Gradient transformations used to enforce specific constraints."""
from typing import Any, NamedTuple
import jax
import jax.numpy as jnp
from optax._src import base
# pylint:disable=no-value-for-parameter
NonNegativeParamsState = base.EmptyState
def keep_params_nonnegative() -> base.GradientTransformation:
"""Modifies the updates to keep parameters non-negative, i.e. >= 0.
This transformation ensures that parameters after the update will be
larger than or equal to zero.
In a chain of transformations, this should be the last one.
WARNING: the transformation expects input params to be non-negative.
When params is negative the transformed update will move them to 0.
Returns:
An (init_fn, update_fn) tuple.
"""
def init_fn(_):
return NonNegativeParamsState()
def update_fn(updates, state, params):
if params is None:
raise ValueError(base.NO_PARAMS_MSG)
updates = jax.tree_multimap(
lambda p, u: jnp.where((p + u) < 0., -p, u), params, updates)
return updates, state
return base.GradientTransformation(init_fn, update_fn)
class ZeroNansState(NamedTuple):
"""Contains a tree.
The entry `found_nan` has the same tree structure as that of the parameters.
Each leaf is a single boolean which contains True iff a NaN was detected in
the corresponding parameter array at the last call to `update`.
"""
found_nan: Any
def zero_nans() -> base.GradientTransformation:
"""A transformation which replaces NaNs with 0.
Zeroing values in gradients is guaranteed to produce a direction of
non-increasing loss.
The state of the transformation has the same tree structure as that of the
parameters. Each leaf is a single boolean which contains True iff a NaN was
detected in the corresponding parameter array at the last call to `update`.
This state is not used by the transformation internally, but lets users be
aware when NaNs have been zeroed out.
Returns:
A `GradientTransformation`.
"""
def init_fn(params):
return ZeroNansState(
jax.tree_map(lambda p: jnp.array(False, dtype=jnp.bool_), params))
def update_fn(updates, opt_state, params=None):
del params
opt_state = ZeroNansState(
jax.tree_map(lambda p: jnp.any(jnp.isnan(p)), updates))
updates = jax.tree_map(
lambda p: jnp.where(jnp.isnan(p), jnp.zeros_like(p), p), updates)
return updates, opt_state
return base.GradientTransformation(init=init_fn, update=update_fn)
| 32.814433 | 80 | 0.723217 |
from typing import Any, NamedTuple
import jax
import jax.numpy as jnp
from optax._src import base
NonNegativeParamsState = base.EmptyState
def keep_params_nonnegative() -> base.GradientTransformation:
def init_fn(_):
return NonNegativeParamsState()
def update_fn(updates, state, params):
if params is None:
raise ValueError(base.NO_PARAMS_MSG)
updates = jax.tree_multimap(
lambda p, u: jnp.where((p + u) < 0., -p, u), params, updates)
return updates, state
return base.GradientTransformation(init_fn, update_fn)
class ZeroNansState(NamedTuple):
found_nan: Any
def zero_nans() -> base.GradientTransformation:
def init_fn(params):
return ZeroNansState(
jax.tree_map(lambda p: jnp.array(False, dtype=jnp.bool_), params))
def update_fn(updates, opt_state, params=None):
del params
opt_state = ZeroNansState(
jax.tree_map(lambda p: jnp.any(jnp.isnan(p)), updates))
updates = jax.tree_map(
lambda p: jnp.where(jnp.isnan(p), jnp.zeros_like(p), p), updates)
return updates, opt_state
return base.GradientTransformation(init=init_fn, update=update_fn)
| true | true |
f724b40a17c5a178efc2d867f04369de536589ee | 98 | py | Python | lib/python3.7/copyreg.py | Abhishek5101/Django-REST-API-User-authentication | 264ba261826fc11180e6452e92aeb6a52201ff9b | [
"Apache-1.1"
] | 1 | 2020-06-12T21:19:40.000Z | 2020-06-12T21:19:40.000Z | Config/lib/python3.7/copyreg.py | ThisGirlSaraCyber/MicroMasters-MITx-DEDP | 5ad39fdf0d1516b06eb0385fd48a3cc7435fcd13 | [
"MIT"
] | 6 | 2021-03-19T01:55:58.000Z | 2021-09-22T18:52:59.000Z | env/lib/python3.7/copyreg.py | OnboardFlow/api-docs | 90200e936386b12314a5e0dadb155273bbfd5e12 | [
"Apache-2.0"
] | null | null | null | /usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/copyreg.py | 98 | 98 | 0.816327 | /usr/local/Cellar/python/3.7.6_1/Frameworks/Python.framework/Versions/3.7/lib/python3.7/copyreg.py | false | true |
f724b40a71f16bb470b2cc5786eac0d4c6031f53 | 20,284 | py | Python | tests/auth/test_init.py | JariInc/home-assistant | 24a8d60566fb5cf62942d042d38965a705d1bc65 | [
"Apache-2.0"
] | null | null | null | tests/auth/test_init.py | JariInc/home-assistant | 24a8d60566fb5cf62942d042d38965a705d1bc65 | [
"Apache-2.0"
] | null | null | null | tests/auth/test_init.py | JariInc/home-assistant | 24a8d60566fb5cf62942d042d38965a705d1bc65 | [
"Apache-2.0"
] | 3 | 2018-09-14T07:34:09.000Z | 2018-09-29T12:57:10.000Z | """Tests for the Home Assistant auth module."""
from datetime import timedelta
from unittest.mock import Mock, patch
import pytest
from homeassistant import auth, data_entry_flow
from homeassistant.auth import (
models as auth_models, auth_store, const as auth_const)
from homeassistant.auth.mfa_modules import SESSION_EXPIRATION
from homeassistant.util import dt as dt_util
from tests.common import (
MockUser, ensure_auth_manager_loaded, flush_store, CLIENT_ID)
@pytest.fixture
def mock_hass(loop):
"""Hass mock with minimum amount of data set to make it work with auth."""
hass = Mock()
hass.config.skip_pip = True
return hass
async def test_auth_manager_from_config_validates_config_and_id(mock_hass):
"""Test get auth providers."""
manager = await auth.auth_manager_from_config(mock_hass, [{
'name': 'Test Name',
'type': 'insecure_example',
'users': [],
}, {
'name': 'Invalid config because no users',
'type': 'insecure_example',
'id': 'invalid_config',
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
'users': [],
}, {
'name': 'Wrong because duplicate ID',
'type': 'insecure_example',
'id': 'another',
'users': [],
}], [])
providers = [{
'name': provider.name,
'id': provider.id,
'type': provider.type,
} for provider in manager.auth_providers]
assert providers == [{
'name': 'Test Name',
'type': 'insecure_example',
'id': None,
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
}]
async def test_auth_manager_from_config_auth_modules(mock_hass):
"""Test get auth modules."""
manager = await auth.auth_manager_from_config(mock_hass, [{
'name': 'Test Name',
'type': 'insecure_example',
'users': [],
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
'users': [],
}], [{
'name': 'Module 1',
'type': 'insecure_example',
'data': [],
}, {
'name': 'Module 2',
'type': 'insecure_example',
'id': 'another',
'data': [],
}, {
'name': 'Duplicate ID',
'type': 'insecure_example',
'id': 'another',
'data': [],
}])
providers = [{
'name': provider.name,
'type': provider.type,
'id': provider.id,
} for provider in manager.auth_providers]
assert providers == [{
'name': 'Test Name',
'type': 'insecure_example',
'id': None,
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
}]
modules = [{
'name': module.name,
'type': module.type,
'id': module.id,
} for module in manager.auth_mfa_modules]
assert modules == [{
'name': 'Module 1',
'type': 'insecure_example',
'id': 'insecure_example',
}, {
'name': 'Module 2',
'type': 'insecure_example',
'id': 'another',
}]
async def test_create_new_user(hass):
"""Test creating new user."""
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}]
}], [])
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.is_owner is False
assert user.name == 'Test Name'
async def test_login_as_existing_user(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}]
}], [])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add a fake user that we're not going to log in with
user = MockUser(
id='mock-user2',
is_owner=False,
is_active=False,
name='Not user',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id2',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'other-user'},
is_new=False,
))
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.id == 'mock-user'
assert user.is_owner is False
assert user.is_active is False
assert user.name == 'Paulus'
async def test_linking_user_to_two_auth_providers(hass, hass_storage):
"""Test linking user to two auth providers."""
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
}]
}, {
'type': 'insecure_example',
'id': 'another-provider',
'users': [{
'username': 'another-user',
'password': 'another-password',
}]
}], [])
step = await manager.login_flow.async_init(('insecure_example', None))
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
user = step['result']
assert user is not None
step = await manager.login_flow.async_init(
('insecure_example', 'another-provider'),
context={'credential_only': True})
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'another-user',
'password': 'another-password',
})
new_credential = step['result']
await manager.async_link_user(user, new_credential)
assert len(user.credentials) == 2
async def test_saving_loading(hass, hass_storage):
"""Test storing and saving data.
Creates one of each type that we store to test we restore correctly.
"""
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
}]
}], [])
step = await manager.login_flow.async_init(('insecure_example', None))
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
user = step['result']
await manager.async_activate_user(user)
await manager.async_create_refresh_token(user, CLIENT_ID)
await flush_store(manager._store._store)
store2 = auth_store.AuthStore(hass)
users = await store2.async_get_users()
assert len(users) == 1
assert users[0] == user
async def test_cannot_retrieve_expired_access_token(hass):
"""Test that we cannot retrieve expired access tokens."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.user.id is user.id
assert refresh_token.client_id == CLIENT_ID
access_token = manager.async_create_access_token(refresh_token)
assert (
await manager.async_validate_access_token(access_token)
is refresh_token
)
with patch('homeassistant.util.dt.utcnow',
return_value=dt_util.utcnow() -
auth_const.ACCESS_TOKEN_EXPIRATION - timedelta(seconds=11)):
access_token = manager.async_create_access_token(refresh_token)
assert (
await manager.async_validate_access_token(access_token)
is None
)
async def test_generating_system_user(hass):
"""Test that we can add a system user."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user('Hass.io')
token = await manager.async_create_refresh_token(user)
assert user.system_generated
assert token is not None
assert token.client_id is None
async def test_refresh_token_requires_client_for_user(hass):
"""Test that we can add a system user."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
assert user.system_generated is False
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user)
token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert token is not None
assert token.client_id == CLIENT_ID
async def test_refresh_token_not_requires_client_for_system_user(hass):
"""Test that we can add a system user."""
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user('Hass.io')
assert user.system_generated is True
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user, CLIENT_ID)
token = await manager.async_create_refresh_token(user)
assert token is not None
assert token.client_id is None
async def test_cannot_deactive_owner(mock_hass):
"""Test that we cannot deactive the owner."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
owner = MockUser(
is_owner=True,
).add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_deactivate_user(owner)
async def test_remove_refresh_token(mock_hass):
"""Test that we can remove a refresh token."""
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
access_token = manager.async_create_access_token(refresh_token)
await manager.async_remove_refresh_token(refresh_token)
assert (
await manager.async_get_refresh_token(refresh_token.id) is None
)
assert (
await manager.async_validate_access_token(access_token) is None
)
async def test_login_with_auth_module(mock_hass):
"""Test login as existing user with auth module."""
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}],
}], [{
'type': 'insecure_example',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin'
}]
}])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
# After auth_provider validated, request auth module input form
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'invalid-pin',
})
# Invalid auth error
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
assert step['errors'] == {'base': 'invalid_auth'}
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin',
})
# Finally passed, get user
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.id == 'mock-user'
assert user.is_owner is False
assert user.is_active is False
assert user.name == 'Paulus'
async def test_login_with_multi_auth_module(mock_hass):
"""Test login as existing user with multiple auth modules."""
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}],
}], [{
'type': 'insecure_example',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin'
}]
}, {
'type': 'insecure_example',
'id': 'module2',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin2'
}]
}])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
# After auth_provider validated, request select auth module
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'select_mfa_module'
step = await manager.login_flow.async_configure(step['flow_id'], {
'multi_factor_auth_module': 'module2',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin2',
})
# Finally passed, get user
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.id == 'mock-user'
assert user.is_owner is False
assert user.is_active is False
assert user.name == 'Paulus'
async def test_auth_module_expired_session(mock_hass):
"""Test login as existing user."""
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}],
}], [{
'type': 'insecure_example',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin'
}]
}])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
with patch('homeassistant.util.dt.utcnow',
return_value=dt_util.utcnow() + SESSION_EXPIRATION):
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin',
})
# Invalid auth due session timeout
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
assert step['errors']['base'] == 'login_expired'
# The second try will fail as well
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
assert step['errors']['base'] == 'login_expired'
async def test_enable_mfa_for_user(hass, hass_storage):
"""Test enable mfa module for user."""
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
}]
}], [{
'type': 'insecure_example',
'data': [],
}])
step = await manager.login_flow.async_init(('insecure_example', None))
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
user = step['result']
assert user is not None
# new user don't have mfa enabled
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
module = manager.get_auth_mfa_module('insecure_example')
# mfa module don't have data
assert bool(module._data) is False
# test enable mfa for user
await manager.async_enable_user_mfa(user, 'insecure_example',
{'pin': 'test-pin'})
assert len(module._data) == 1
assert module._data[0] == {'user_id': user.id, 'pin': 'test-pin'}
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert 'insecure_example' in modules
# re-enable mfa for user will override
await manager.async_enable_user_mfa(user, 'insecure_example',
{'pin': 'test-pin-new'})
assert len(module._data) == 1
assert module._data[0] == {'user_id': user.id, 'pin': 'test-pin-new'}
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert 'insecure_example' in modules
# system user cannot enable mfa
system_user = await manager.async_create_system_user('system-user')
with pytest.raises(ValueError):
await manager.async_enable_user_mfa(system_user, 'insecure_example',
{'pin': 'test-pin'})
assert len(module._data) == 1
modules = await manager.async_get_enabled_mfa(system_user)
assert len(modules) == 0
# disable mfa for user
await manager.async_disable_user_mfa(user, 'insecure_example')
assert bool(module._data) is False
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
# disable mfa for user don't enabled just silent fail
await manager.async_disable_user_mfa(user, 'insecure_example')
| 31.793103 | 78 | 0.622215 | from datetime import timedelta
from unittest.mock import Mock, patch
import pytest
from homeassistant import auth, data_entry_flow
from homeassistant.auth import (
models as auth_models, auth_store, const as auth_const)
from homeassistant.auth.mfa_modules import SESSION_EXPIRATION
from homeassistant.util import dt as dt_util
from tests.common import (
MockUser, ensure_auth_manager_loaded, flush_store, CLIENT_ID)
@pytest.fixture
def mock_hass(loop):
hass = Mock()
hass.config.skip_pip = True
return hass
async def test_auth_manager_from_config_validates_config_and_id(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [{
'name': 'Test Name',
'type': 'insecure_example',
'users': [],
}, {
'name': 'Invalid config because no users',
'type': 'insecure_example',
'id': 'invalid_config',
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
'users': [],
}, {
'name': 'Wrong because duplicate ID',
'type': 'insecure_example',
'id': 'another',
'users': [],
}], [])
providers = [{
'name': provider.name,
'id': provider.id,
'type': provider.type,
} for provider in manager.auth_providers]
assert providers == [{
'name': 'Test Name',
'type': 'insecure_example',
'id': None,
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
}]
async def test_auth_manager_from_config_auth_modules(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [{
'name': 'Test Name',
'type': 'insecure_example',
'users': [],
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
'users': [],
}], [{
'name': 'Module 1',
'type': 'insecure_example',
'data': [],
}, {
'name': 'Module 2',
'type': 'insecure_example',
'id': 'another',
'data': [],
}, {
'name': 'Duplicate ID',
'type': 'insecure_example',
'id': 'another',
'data': [],
}])
providers = [{
'name': provider.name,
'type': provider.type,
'id': provider.id,
} for provider in manager.auth_providers]
assert providers == [{
'name': 'Test Name',
'type': 'insecure_example',
'id': None,
}, {
'name': 'Test Name 2',
'type': 'insecure_example',
'id': 'another',
}]
modules = [{
'name': module.name,
'type': module.type,
'id': module.id,
} for module in manager.auth_mfa_modules]
assert modules == [{
'name': 'Module 1',
'type': 'insecure_example',
'id': 'insecure_example',
}, {
'name': 'Module 2',
'type': 'insecure_example',
'id': 'another',
}]
async def test_create_new_user(hass):
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}]
}], [])
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.is_owner is False
assert user.name == 'Test Name'
async def test_login_as_existing_user(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}]
}], [])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
user = MockUser(
id='mock-user2',
is_owner=False,
is_active=False,
name='Not user',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id2',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'other-user'},
is_new=False,
))
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.id == 'mock-user'
assert user.is_owner is False
assert user.is_active is False
assert user.name == 'Paulus'
async def test_linking_user_to_two_auth_providers(hass, hass_storage):
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
}]
}, {
'type': 'insecure_example',
'id': 'another-provider',
'users': [{
'username': 'another-user',
'password': 'another-password',
}]
}], [])
step = await manager.login_flow.async_init(('insecure_example', None))
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
user = step['result']
assert user is not None
step = await manager.login_flow.async_init(
('insecure_example', 'another-provider'),
context={'credential_only': True})
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'another-user',
'password': 'another-password',
})
new_credential = step['result']
await manager.async_link_user(user, new_credential)
assert len(user.credentials) == 2
async def test_saving_loading(hass, hass_storage):
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
}]
}], [])
step = await manager.login_flow.async_init(('insecure_example', None))
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
user = step['result']
await manager.async_activate_user(user)
await manager.async_create_refresh_token(user, CLIENT_ID)
await flush_store(manager._store._store)
store2 = auth_store.AuthStore(hass)
users = await store2.async_get_users()
assert len(users) == 1
assert users[0] == user
async def test_cannot_retrieve_expired_access_token(hass):
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert refresh_token.user.id is user.id
assert refresh_token.client_id == CLIENT_ID
access_token = manager.async_create_access_token(refresh_token)
assert (
await manager.async_validate_access_token(access_token)
is refresh_token
)
with patch('homeassistant.util.dt.utcnow',
return_value=dt_util.utcnow() -
auth_const.ACCESS_TOKEN_EXPIRATION - timedelta(seconds=11)):
access_token = manager.async_create_access_token(refresh_token)
assert (
await manager.async_validate_access_token(access_token)
is None
)
async def test_generating_system_user(hass):
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user('Hass.io')
token = await manager.async_create_refresh_token(user)
assert user.system_generated
assert token is not None
assert token.client_id is None
async def test_refresh_token_requires_client_for_user(hass):
manager = await auth.auth_manager_from_config(hass, [], [])
user = MockUser().add_to_auth_manager(manager)
assert user.system_generated is False
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user)
token = await manager.async_create_refresh_token(user, CLIENT_ID)
assert token is not None
assert token.client_id == CLIENT_ID
async def test_refresh_token_not_requires_client_for_system_user(hass):
manager = await auth.auth_manager_from_config(hass, [], [])
user = await manager.async_create_system_user('Hass.io')
assert user.system_generated is True
with pytest.raises(ValueError):
await manager.async_create_refresh_token(user, CLIENT_ID)
token = await manager.async_create_refresh_token(user)
assert token is not None
assert token.client_id is None
async def test_cannot_deactive_owner(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [], [])
owner = MockUser(
is_owner=True,
).add_to_auth_manager(manager)
with pytest.raises(ValueError):
await manager.async_deactivate_user(owner)
async def test_remove_refresh_token(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [], [])
user = MockUser().add_to_auth_manager(manager)
refresh_token = await manager.async_create_refresh_token(user, CLIENT_ID)
access_token = manager.async_create_access_token(refresh_token)
await manager.async_remove_refresh_token(refresh_token)
assert (
await manager.async_get_refresh_token(refresh_token.id) is None
)
assert (
await manager.async_validate_access_token(access_token) is None
)
async def test_login_with_auth_module(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}],
}], [{
'type': 'insecure_example',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin'
}]
}])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
# After auth_provider validated, request auth module input form
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'invalid-pin',
})
# Invalid auth error
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
assert step['errors'] == {'base': 'invalid_auth'}
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin',
})
# Finally passed, get user
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.id == 'mock-user'
assert user.is_owner is False
assert user.is_active is False
assert user.name == 'Paulus'
async def test_login_with_multi_auth_module(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}],
}], [{
'type': 'insecure_example',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin'
}]
}, {
'type': 'insecure_example',
'id': 'module2',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin2'
}]
}])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
# After auth_provider validated, request select auth module
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'select_mfa_module'
step = await manager.login_flow.async_configure(step['flow_id'], {
'multi_factor_auth_module': 'module2',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin2',
})
# Finally passed, get user
assert step['type'] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
user = step['result']
assert user is not None
assert user.id == 'mock-user'
assert user.is_owner is False
assert user.is_active is False
assert user.name == 'Paulus'
async def test_auth_module_expired_session(mock_hass):
manager = await auth.auth_manager_from_config(mock_hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
'name': 'Test Name'
}],
}], [{
'type': 'insecure_example',
'data': [{
'user_id': 'mock-user',
'pin': 'test-pin'
}]
}])
mock_hass.auth = manager
ensure_auth_manager_loaded(manager)
# Add fake user with credentials for example auth provider.
user = MockUser(
id='mock-user',
is_owner=False,
is_active=False,
name='Paulus',
).add_to_auth_manager(manager)
user.credentials.append(auth_models.Credentials(
id='mock-id',
auth_provider_type='insecure_example',
auth_provider_id=None,
data={'username': 'test-user'},
is_new=False,
))
step = await manager.login_flow.async_init(('insecure_example', None))
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
with patch('homeassistant.util.dt.utcnow',
return_value=dt_util.utcnow() + SESSION_EXPIRATION):
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin',
})
# Invalid auth due session timeout
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
assert step['errors']['base'] == 'login_expired'
# The second try will fail as well
step = await manager.login_flow.async_configure(step['flow_id'], {
'pin': 'test-pin',
})
assert step['type'] == data_entry_flow.RESULT_TYPE_FORM
assert step['step_id'] == 'mfa'
assert step['errors']['base'] == 'login_expired'
async def test_enable_mfa_for_user(hass, hass_storage):
manager = await auth.auth_manager_from_config(hass, [{
'type': 'insecure_example',
'users': [{
'username': 'test-user',
'password': 'test-pass',
}]
}], [{
'type': 'insecure_example',
'data': [],
}])
step = await manager.login_flow.async_init(('insecure_example', None))
step = await manager.login_flow.async_configure(step['flow_id'], {
'username': 'test-user',
'password': 'test-pass',
})
user = step['result']
assert user is not None
# new user don't have mfa enabled
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
module = manager.get_auth_mfa_module('insecure_example')
assert bool(module._data) is False
# test enable mfa for user
await manager.async_enable_user_mfa(user, 'insecure_example',
{'pin': 'test-pin'})
assert len(module._data) == 1
assert module._data[0] == {'user_id': user.id, 'pin': 'test-pin'}
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert 'insecure_example' in modules
# re-enable mfa for user will override
await manager.async_enable_user_mfa(user, 'insecure_example',
{'pin': 'test-pin-new'})
assert len(module._data) == 1
assert module._data[0] == {'user_id': user.id, 'pin': 'test-pin-new'}
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 1
assert 'insecure_example' in modules
# system user cannot enable mfa
system_user = await manager.async_create_system_user('system-user')
with pytest.raises(ValueError):
await manager.async_enable_user_mfa(system_user, 'insecure_example',
{'pin': 'test-pin'})
assert len(module._data) == 1
modules = await manager.async_get_enabled_mfa(system_user)
assert len(modules) == 0
# disable mfa for user
await manager.async_disable_user_mfa(user, 'insecure_example')
assert bool(module._data) is False
# test get enabled mfa
modules = await manager.async_get_enabled_mfa(user)
assert len(modules) == 0
# disable mfa for user don't enabled just silent fail
await manager.async_disable_user_mfa(user, 'insecure_example')
| true | true |
f724b4c6733fdd2f61ff3c2a9eaddf0e2852a32c | 1,262 | py | Python | 1200-1300q/1266.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 990 | 2018-06-05T11:49:22.000Z | 2022-03-31T08:59:17.000Z | 1200-1300q/1266.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 1 | 2021-11-01T01:29:38.000Z | 2021-11-01T01:29:38.000Z | 1200-1300q/1266.py | rampup01/Leetcode | 8450a95a966ef83b24ffe6450f06ce8de92b3efb | [
"MIT"
] | 482 | 2018-06-12T22:16:53.000Z | 2022-03-29T00:23:29.000Z | '''
On a plane there are n points with integer coordinates points[i] = [xi, yi]. Your task is to find the minimum time in seconds to visit all points.
You can move according to the next rules:
In one second always you can either move vertically, horizontally by one unit or diagonally (it means to move one unit vertically and one unit horizontally in one second).
You have to visit the points in the same order as they appear in the array.
Input: points = [[1,1],[3,4],[-1,0]]
Output: 7
Explanation: One optimal path is [1,1] -> [2,2] -> [3,3] -> [3,4] -> [2,3] -> [1,2] -> [0,1] -> [-1,0]
Time from [1,1] to [3,4] = 3 seconds
Time from [3,4] to [-1,0] = 4 seconds
Total time = 7 seconds
Example 2:
Input: points = [[3,2],[-2,2]]
Output: 5
Constraints:
points.length == n
1 <= n <= 100
points[i].length == 2
-1000 <= points[i][0], points[i][1] <= 1000
'''
class Solution(object):
def minTimeToVisitAllPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
if not points:
return 0
result = 0
for index in range(1, len(points)):
result += max(abs(points[index][0]-points[index-1][0]), abs(points[index][1]-points[index-1][1]))
return result
| 30.780488 | 171 | 0.616482 |
class Solution(object):
def minTimeToVisitAllPoints(self, points):
if not points:
return 0
result = 0
for index in range(1, len(points)):
result += max(abs(points[index][0]-points[index-1][0]), abs(points[index][1]-points[index-1][1]))
return result
| true | true |
f724b5246de70e1ed75d10855245cd87b8034692 | 2,628 | py | Python | app/selenium_ui/jira_ui.py | Unleash/jira-performance-tests | 3c84d65cbf70e2db5ea4c1af303d6dab4a218771 | [
"Apache-2.0"
] | null | null | null | app/selenium_ui/jira_ui.py | Unleash/jira-performance-tests | 3c84d65cbf70e2db5ea4c1af303d6dab4a218771 | [
"Apache-2.0"
] | null | null | null | app/selenium_ui/jira_ui.py | Unleash/jira-performance-tests | 3c84d65cbf70e2db5ea4c1af303d6dab4a218771 | [
"Apache-2.0"
] | null | null | null | from selenium_ui.jira import modules
from extension.jira import extension_ui # noqa F401
# this action should be the first one
def test_0_selenium_a_login(jira_webdriver, jira_datasets, jira_screen_shots):
modules.login(jira_webdriver, jira_datasets)
def test_1_selenium_browse_projects_list(jira_webdriver, jira_datasets, jira_screen_shots):
modules.browse_projects_list(jira_webdriver, jira_datasets)
def test_1_selenium_browse_boards_list(jira_webdriver, jira_datasets, jira_screen_shots):
modules.browse_boards_list(jira_webdriver, jira_datasets)
def test_1_selenium_create_issue(jira_webdriver, jira_datasets, jira_screen_shots):
modules.create_issue(jira_webdriver, jira_datasets)
def test_1_selenium_edit_issue(jira_webdriver, jira_datasets, jira_screen_shots):
modules.edit_issue(jira_webdriver, jira_datasets)
def test_1_selenium_save_comment(jira_webdriver, jira_datasets, jira_screen_shots):
modules.save_comment(jira_webdriver, jira_datasets)
def test_1_selenium_search_jql(jira_webdriver, jira_datasets, jira_screen_shots):
modules.search_jql(jira_webdriver, jira_datasets)
def test_1_selenium_view_backlog_for_scrum_board(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_backlog_for_scrum_board(jira_webdriver, jira_datasets)
def test_1_selenium_view_scrum_board(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_scrum_board(jira_webdriver, jira_datasets)
def test_1_selenium_view_kanban_board(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_kanban_board(jira_webdriver, jira_datasets)
def test_1_selenium_view_dashboard(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_dashboard(jira_webdriver, jira_datasets)
def test_1_selenium_view_issue(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_issue(jira_webdriver, jira_datasets)
def test_1_selenium_view_project_summary(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_project_summary(jira_webdriver, jira_datasets)
"""
Add custom actions anywhere between login and log out action. Move this to a different line as needed.
Write your custom selenium scripts in `app/extension/jira/extension_ui.py`.
Refer to `app/selenium_ui/jira/modules.py` for examples.
"""
def test_1_selenium_view_bind_unleash_toggle(jira_webdriver, jira_datasets, jira_screen_shots):
extension_ui.app_specific_action(jira_webdriver, jira_datasets)
# this action should be the last one
def test_2_selenium_z_log_out(jira_webdriver, jira_datasets, jira_screen_shots):
modules.log_out(jira_webdriver, jira_datasets)
| 38.086957 | 102 | 0.847412 | from selenium_ui.jira import modules
from extension.jira import extension_ui
def test_0_selenium_a_login(jira_webdriver, jira_datasets, jira_screen_shots):
modules.login(jira_webdriver, jira_datasets)
def test_1_selenium_browse_projects_list(jira_webdriver, jira_datasets, jira_screen_shots):
modules.browse_projects_list(jira_webdriver, jira_datasets)
def test_1_selenium_browse_boards_list(jira_webdriver, jira_datasets, jira_screen_shots):
modules.browse_boards_list(jira_webdriver, jira_datasets)
def test_1_selenium_create_issue(jira_webdriver, jira_datasets, jira_screen_shots):
modules.create_issue(jira_webdriver, jira_datasets)
def test_1_selenium_edit_issue(jira_webdriver, jira_datasets, jira_screen_shots):
modules.edit_issue(jira_webdriver, jira_datasets)
def test_1_selenium_save_comment(jira_webdriver, jira_datasets, jira_screen_shots):
modules.save_comment(jira_webdriver, jira_datasets)
def test_1_selenium_search_jql(jira_webdriver, jira_datasets, jira_screen_shots):
modules.search_jql(jira_webdriver, jira_datasets)
def test_1_selenium_view_backlog_for_scrum_board(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_backlog_for_scrum_board(jira_webdriver, jira_datasets)
def test_1_selenium_view_scrum_board(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_scrum_board(jira_webdriver, jira_datasets)
def test_1_selenium_view_kanban_board(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_kanban_board(jira_webdriver, jira_datasets)
def test_1_selenium_view_dashboard(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_dashboard(jira_webdriver, jira_datasets)
def test_1_selenium_view_issue(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_issue(jira_webdriver, jira_datasets)
def test_1_selenium_view_project_summary(jira_webdriver, jira_datasets, jira_screen_shots):
modules.view_project_summary(jira_webdriver, jira_datasets)
def test_1_selenium_view_bind_unleash_toggle(jira_webdriver, jira_datasets, jira_screen_shots):
extension_ui.app_specific_action(jira_webdriver, jira_datasets)
def test_2_selenium_z_log_out(jira_webdriver, jira_datasets, jira_screen_shots):
modules.log_out(jira_webdriver, jira_datasets)
| true | true |
f724b5fcc381b20213023849c42e3eab574ce8f1 | 8,163 | py | Python | pgoapi/pgoapi.py | tetsunosuke/pgoapi | faeab3668b9efcd4955fe382ef735a9ee31a1784 | [
"MIT"
] | 1 | 2019-05-29T06:17:22.000Z | 2019-05-29T06:17:22.000Z | pgoapi/pgoapi.py | tetsunosuke/pgoapi | faeab3668b9efcd4955fe382ef735a9ee31a1784 | [
"MIT"
] | null | null | null | pgoapi/pgoapi.py | tetsunosuke/pgoapi | faeab3668b9efcd4955fe382ef735a9ee31a1784 | [
"MIT"
] | null | null | null | """
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
from __future__ import absolute_import
import re
import six
import logging
import requests
from . import __title__, __version__, __copyright__
from pgoapi.rpc_api import RpcApi
from pgoapi.auth_ptc import AuthPtc
from pgoapi.auth_google import AuthGoogle
from pgoapi.exceptions import AuthException, NotLoggedInException, ServerBusyOrOfflineException, NoPlayerPositionSetException, EmptySubrequestChainException
from . import protos
from POGOProtos.Networking.Requests_pb2 import RequestType
logger = logging.getLogger(__name__)
class PGoApi:
def __init__(self):
self.set_logger()
self._auth_provider = None
self._api_endpoint = 'https://pgorelease.nianticlabs.com/plfe/rpc'
self._position_lat = None
self._position_lng = None
self._position_alt = None
self.log.info('%s v%s - %s', __title__, __version__, __copyright__ )
def set_logger(self, logger = None):
self.log = logger or logging.getLogger(__name__)
def get_api_endpoint(self):
return self._api_endpoint
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = lat
self._position_lng = lng
self._position_alt = alt
def create_request(self):
request = PGoApiRequest(self._api_endpoint, self._auth_provider, self._position_lat, self._position_lng, self._position_alt)
return request
def __getattr__(self, func):
def function(**kwargs):
request = self.create_request()
getattr(request, func)( _call_direct = True, **kwargs )
return request.call()
if func.upper() in RequestType.keys():
return function
else:
print func.upper()
print RequestType.keys()
raise AttributeError
def login(self, provider, username, password, lat = None, lng = None, alt = None, app_simulation = True):
if (lat is not None) and (lng is not None) and (alt is not None):
self._position_lat = lat
self._position_lng = lng
self._position_alt = alt
if not isinstance(username, six.string_types) or not isinstance(password, six.string_types):
raise AuthException("Username/password not correctly specified")
if provider == 'ptc':
self._auth_provider = AuthPtc()
elif provider == 'google':
self._auth_provider = AuthGoogle()
else:
raise AuthException("Invalid authentication provider - only ptc/google available.")
self.log.debug('Auth provider: %s', provider)
if not self._auth_provider.login(username, password):
self.log.info('Login process failed')
return False
if app_simulation:
self.log.info('Starting RPC login sequence (app simulation)')
# making a standard call, like it is also done by the client
request = self.create_request()
request.get_player()
request.get_hatched_eggs()
request.get_inventory()
request.check_awarded_badges()
request.download_settings(hash="05daf51635c82611d1aac95c0b051d3ec088a930")
response = request.call()
else:
self.log.info('Starting minimal RPC login sequence')
response = self.get_player()
if not response:
self.log.info('Login failed!')
return False
if 'api_url' in response:
self._api_endpoint = ('https://{}/rpc'.format(response['api_url']))
self.log.debug('Setting API endpoint to: %s', self._api_endpoint)
else:
self.log.error('Login failed - unexpected server response!')
return False
if app_simulation:
self.log.info('Finished RPC login sequence (app simulation)')
else:
self.log.info('Finished minimal RPC login sequence')
self.log.info('Login process completed')
return True
class PGoApiRequest:
def __init__(self, api_endpoint, auth_provider, position_lat, position_lng, position_alt):
self.log = logging.getLogger(__name__)
""" Inherit necessary parameters """
self._api_endpoint = api_endpoint
self._auth_provider = auth_provider
self._position_lat = position_lat
self._position_lng = position_lng
self._position_alt = position_alt
self._req_method_list = []
def call(self):
if not self._req_method_list:
raise EmptySubrequestChainException()
if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None):
raise NoPlayerPositionSetException()
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
return NotLoggedInException()
request = RpcApi(self._auth_provider)
self.log.info('Execution of RPC')
response = None
try:
response = request.request(self._api_endpoint, self._req_method_list, self.get_position())
except ServerBusyOrOfflineException as e:
self.log.info('Server seems to be busy or offline - try again!')
# cleanup after call execution
self.log.info('Cleanup of request!')
self._req_method_list = []
return response
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RequestType.Name(i), i))
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = lat
self._position_lng = lng
self._position_alt = alt
def __getattr__(self, func):
def function(**kwargs):
if '_call_direct' in kwargs:
del kwargs['_call_direct']
self.log.info('Creating a new direct request...')
elif not self._req_method_list:
self.log.info('Creating a new request...')
name = func.upper()
if kwargs:
self._req_method_list.append({RequestType.Value(name): kwargs})
self.log.info("Adding '%s' to RPC request including arguments", name)
self.log.debug("Arguments of '%s': \n\r%s", name, kwargs)
else:
self._req_method_list.append(RequestType.Value(name))
self.log.info("Adding '%s' to RPC request", name)
return self
if func.upper() in RequestType.keys():
return function
else:
raise AttributeError
| 35.185345 | 156 | 0.651599 | """
pgoapi - Pokemon Go API
Copyright (c) 2016 tjado <https://github.com/tejado>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
OR OTHER DEALINGS IN THE SOFTWARE.
Author: tjado <https://github.com/tejado>
"""
from __future__ import absolute_import
import re
import six
import logging
import requests
from . import __title__, __version__, __copyright__
from pgoapi.rpc_api import RpcApi
from pgoapi.auth_ptc import AuthPtc
from pgoapi.auth_google import AuthGoogle
from pgoapi.exceptions import AuthException, NotLoggedInException, ServerBusyOrOfflineException, NoPlayerPositionSetException, EmptySubrequestChainException
from . import protos
from POGOProtos.Networking.Requests_pb2 import RequestType
logger = logging.getLogger(__name__)
class PGoApi:
def __init__(self):
self.set_logger()
self._auth_provider = None
self._api_endpoint = 'https://pgorelease.nianticlabs.com/plfe/rpc'
self._position_lat = None
self._position_lng = None
self._position_alt = None
self.log.info('%s v%s - %s', __title__, __version__, __copyright__ )
def set_logger(self, logger = None):
self.log = logger or logging.getLogger(__name__)
def get_api_endpoint(self):
return self._api_endpoint
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = lat
self._position_lng = lng
self._position_alt = alt
def create_request(self):
request = PGoApiRequest(self._api_endpoint, self._auth_provider, self._position_lat, self._position_lng, self._position_alt)
return request
def __getattr__(self, func):
def function(**kwargs):
request = self.create_request()
getattr(request, func)( _call_direct = True, **kwargs )
return request.call()
if func.upper() in RequestType.keys():
return function
else:
print func.upper()
print RequestType.keys()
raise AttributeError
def login(self, provider, username, password, lat = None, lng = None, alt = None, app_simulation = True):
if (lat is not None) and (lng is not None) and (alt is not None):
self._position_lat = lat
self._position_lng = lng
self._position_alt = alt
if not isinstance(username, six.string_types) or not isinstance(password, six.string_types):
raise AuthException("Username/password not correctly specified")
if provider == 'ptc':
self._auth_provider = AuthPtc()
elif provider == 'google':
self._auth_provider = AuthGoogle()
else:
raise AuthException("Invalid authentication provider - only ptc/google available.")
self.log.debug('Auth provider: %s', provider)
if not self._auth_provider.login(username, password):
self.log.info('Login process failed')
return False
if app_simulation:
self.log.info('Starting RPC login sequence (app simulation)')
request = self.create_request()
request.get_player()
request.get_hatched_eggs()
request.get_inventory()
request.check_awarded_badges()
request.download_settings(hash="05daf51635c82611d1aac95c0b051d3ec088a930")
response = request.call()
else:
self.log.info('Starting minimal RPC login sequence')
response = self.get_player()
if not response:
self.log.info('Login failed!')
return False
if 'api_url' in response:
self._api_endpoint = ('https://{}/rpc'.format(response['api_url']))
self.log.debug('Setting API endpoint to: %s', self._api_endpoint)
else:
self.log.error('Login failed - unexpected server response!')
return False
if app_simulation:
self.log.info('Finished RPC login sequence (app simulation)')
else:
self.log.info('Finished minimal RPC login sequence')
self.log.info('Login process completed')
return True
class PGoApiRequest:
def __init__(self, api_endpoint, auth_provider, position_lat, position_lng, position_alt):
self.log = logging.getLogger(__name__)
""" Inherit necessary parameters """
self._api_endpoint = api_endpoint
self._auth_provider = auth_provider
self._position_lat = position_lat
self._position_lng = position_lng
self._position_alt = position_alt
self._req_method_list = []
def call(self):
if not self._req_method_list:
raise EmptySubrequestChainException()
if (self._position_lat is None) or (self._position_lng is None) or (self._position_alt is None):
raise NoPlayerPositionSetException()
if self._auth_provider is None or not self._auth_provider.is_login():
self.log.info('Not logged in')
return NotLoggedInException()
request = RpcApi(self._auth_provider)
self.log.info('Execution of RPC')
response = None
try:
response = request.request(self._api_endpoint, self._req_method_list, self.get_position())
except ServerBusyOrOfflineException as e:
self.log.info('Server seems to be busy or offline - try again!')
self.log.info('Cleanup of request!')
self._req_method_list = []
return response
def list_curr_methods(self):
for i in self._req_method_list:
print("{} ({})".format(RequestType.Name(i), i))
def get_position(self):
return (self._position_lat, self._position_lng, self._position_alt)
def set_position(self, lat, lng, alt):
self.log.debug('Set Position - Lat: %s Long: %s Alt: %s', lat, lng, alt)
self._position_lat = lat
self._position_lng = lng
self._position_alt = alt
def __getattr__(self, func):
def function(**kwargs):
if '_call_direct' in kwargs:
del kwargs['_call_direct']
self.log.info('Creating a new direct request...')
elif not self._req_method_list:
self.log.info('Creating a new request...')
name = func.upper()
if kwargs:
self._req_method_list.append({RequestType.Value(name): kwargs})
self.log.info("Adding '%s' to RPC request including arguments", name)
self.log.debug("Arguments of '%s': \n\r%s", name, kwargs)
else:
self._req_method_list.append(RequestType.Value(name))
self.log.info("Adding '%s' to RPC request", name)
return self
if func.upper() in RequestType.keys():
return function
else:
raise AttributeError
| false | true |
f724b67a605f31ce4eecd96e689074ae628c81ea | 12,486 | py | Python | mailchimp3/entities/lists.py | MyMusicTaste/async-python-mailchimp | 10a53dc6f1406fa23abca89da142a51e123dd966 | [
"MIT"
] | 9 | 2018-05-15T06:49:26.000Z | 2020-12-20T13:43:56.000Z | mailchimp3/entities/lists.py | MyMusicTaste/async-python-mailchimp | 10a53dc6f1406fa23abca89da142a51e123dd966 | [
"MIT"
] | null | null | null | mailchimp3/entities/lists.py | MyMusicTaste/async-python-mailchimp | 10a53dc6f1406fa23abca89da142a51e123dd966 | [
"MIT"
] | null | null | null | # coding=utf-8
"""
The Lists API endpoint
Documentation: http://developer.mailchimp.com/documentation/mailchimp/reference/lists/
Schema: https://api.mailchimp.com/schema/3.0/Lists/Instance.json
"""
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
from mailchimp3.entities.listabusereports import ListAbuseReports
from mailchimp3.entities.listactivity import ListActivity
from mailchimp3.entities.listclients import ListClients
from mailchimp3.entities.listgrowthhistory import ListGrowthHistory
from mailchimp3.entities.listinterestcategories import ListInterestCategories
from mailchimp3.entities.listmembers import ListMembers
from mailchimp3.entities.listmergefields import ListMergeFields
from mailchimp3.entities.listsegments import ListSegments
from mailchimp3.entities.listsignupforms import ListSignupForms
from mailchimp3.entities.listwebhooks import ListWebhooks
from mailchimp3.helpers import check_email
class Lists(BaseApi):
"""
A MailChimp list is a powerful and flexible tool that helps you manage your contacts.
"""
def __init__(self, *args, **kwargs):
"""
Initialize the endpoint
"""
super(Lists, self).__init__(*args, **kwargs)
self.endpoint = 'lists'
self.list_id = None
self.abuse_reports = ListAbuseReports(self)
self.activity = ListActivity(self)
self.clients = ListClients(self)
self.growth_history = ListGrowthHistory(self)
self.interest_categories = ListInterestCategories(self)
self.members = ListMembers(self)
self.merge_fields = ListMergeFields(self)
self.segments = ListSegments(self)
self.signup_forms = ListSignupForms(self)
self.webhooks = ListWebhooks(self)
async def create(self, data):
"""
Create a new list in your MailChimp account.
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"contact": object*
{
"company": string*,
"address1": string*,
"city": string*,
"state": string*,
"zip": string*,
"country": string*
},
"permission_reminder": string*,
"campaign_defaults": object*
{
"from_name": string*,
"from_email": string*,
"subject": string*,
"language": string*
},
"email_type_option": boolean
}
"""
if 'name' not in data:
raise KeyError('The list must have a name')
if 'contact' not in data:
raise KeyError('The list must have a contact')
if 'company' not in data['contact']:
raise KeyError('The list contact must have a company')
if 'address1' not in data['contact']:
raise KeyError('The list contact must have a address1')
if 'city' not in data['contact']:
raise KeyError('The list contact must have a city')
if 'state' not in data['contact']:
raise KeyError('The list contact must have a state')
if 'zip' not in data['contact']:
raise KeyError('The list contact must have a zip')
if 'country' not in data['contact']:
raise KeyError('The list contact must have a country')
if 'permission_reminder' not in data:
raise KeyError('The list must have a permission_reminder')
if 'campaign_defaults' not in data:
raise KeyError('The list must have a campaign_defaults')
if 'from_name' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_name')
if 'from_email' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_email')
check_email(data['campaign_defaults']['from_email'])
if 'subject' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a subject')
if 'language' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a language')
if 'email_type_option' not in data:
raise KeyError('The list must have an email_type_option')
if data['email_type_option'] not in [True, False]:
raise TypeError('The list email_type_option must be True or False')
response = await self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.list_id = response['id']
else:
self.list_id = None
return response
async def update_members(self, list_id, data):
"""
Batch subscribe or unsubscribe list members.
Only the members array is required in the request body parameters.
Within the members array, each member requires an email_address
and either a status or status_if_new. The update_existing parameter
will also be considered required to help prevent accidental updates
to existing members and will default to false if not present.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"members": array*
[
{
"email_address": string*,
"status": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending'),
"status_if_new": string* (Must be one of 'subscribed', 'unsubscribed', 'cleaned', or 'pending')
}
],
"update_existing": boolean*
}
"""
self.list_id = list_id
if 'members' not in data:
raise KeyError('The update must have at least one member')
else:
if not len(data['members']) <= 500:
raise ValueError('You may only batch sub/unsub 500 members at a time')
for member in data['members']:
if 'email_address' not in member:
raise KeyError('Each list member must have an email_address')
check_email(member['email_address'])
if 'status' not in member and 'status_if_new' not in member:
raise KeyError('Each list member must have either a status or a status_if_new')
valid_statuses = ['subscribed', 'unsubscribed', 'cleaned', 'pending']
if 'status' in member and member['status'] not in valid_statuses:
raise ValueError('The list member status must be one of "subscribed", "unsubscribed", "cleaned", or '
'"pending"')
if 'status_if_new' in member and member['status_if_new'] not in valid_statuses:
raise ValueError('The list member status_if_new must be one of "subscribed", "unsubscribed", '
'"cleaned", or "pending"')
if 'update_existing' not in data:
data['update_existing'] = False
return await self._mc_client._post(url=self._build_path(list_id), data=data)
async def all(self, get_all=False, **queryparams):
"""
Get information about all lists in the account.
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
queryparams['before_date_created'] = string
queryparams['since_date_created'] = string
queryparams['before_campaign_last_sent'] = string
queryparams['since_campaign_last_sent'] = string
queryparams['email'] = string
queryparams['sort_field'] = string (Must be 'date_created')
queryparams['sort_dir'] = string (Must be one of 'ASC' or 'DESC')
"""
self.list_id = None
if get_all:
return await self._iterate(url=self._build_path(), **queryparams)
else:
return await self._mc_client._get(url=self._build_path(), **queryparams)
async def get(self, list_id, **queryparams):
"""
Get information about a specific list in your MailChimp account.
Results include list members who have signed up but haven’t confirmed
their subscription yet and unsubscribed or cleaned.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
"""
self.list_id = list_id
return await self._mc_client._get(url=self._build_path(list_id), **queryparams)
async def update(self, list_id, data):
"""
Update the settings for a specific list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
:param data: The request body parameters
:type data: :py:class:`dict`
data = {
"name": string*,
"contact": object*
{
"company": string*,
"address1": string*,
"city": string*,
"state": string*,
"zip": string*,
"country": string*
},
"permission_reminder": string*,
"campaign_defaults": object*
{
"from_name": string*,
"from_email": string*,
"subject": string*,
"language": string*
},
"email_type_option": boolean
}
"""
self.list_id = list_id
if 'name' not in data:
raise KeyError('The list must have a name')
if 'contact' not in data:
raise KeyError('The list must have a contact')
if 'company' not in data['contact']:
raise KeyError('The list contact must have a company')
if 'address1' not in data['contact']:
raise KeyError('The list contact must have a address1')
if 'city' not in data['contact']:
raise KeyError('The list contact must have a city')
if 'state' not in data['contact']:
raise KeyError('The list contact must have a state')
if 'zip' not in data['contact']:
raise KeyError('The list contact must have a zip')
if 'country' not in data['contact']:
raise KeyError('The list contact must have a country')
if 'permission_reminder' not in data:
raise KeyError('The list must have a permission_reminder')
if 'campaign_defaults' not in data:
raise KeyError('The list must have a campaign_defaults')
if 'from_name' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_name')
if 'from_email' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_email')
check_email(data['campaign_defaults']['from_email'])
if 'subject' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a subject')
if 'language' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a language')
if 'email_type_option' not in data:
raise KeyError('The list must have an email_type_option')
if data['email_type_option'] not in [True, False]:
raise TypeError('The list email_type_option must be True or False')
return await self._mc_client._patch(url=self._build_path(list_id), data=data)
async def delete(self, list_id):
"""
Delete a list from your MailChimp account. If you delete a list,
you’ll lose the list history—including subscriber activity,
unsubscribes, complaints, and bounces. You’ll also lose subscribers’
email addresses, unless you exported and backed up your list.
:param list_id: The unique id for the list.
:type list_id: :py:class:`str`
"""
self.list_id = list_id
return await self._mc_client._delete(url=self._build_path(list_id))
| 44.276596 | 117 | 0.614448 |
from __future__ import unicode_literals
from mailchimp3.baseapi import BaseApi
from mailchimp3.entities.listabusereports import ListAbuseReports
from mailchimp3.entities.listactivity import ListActivity
from mailchimp3.entities.listclients import ListClients
from mailchimp3.entities.listgrowthhistory import ListGrowthHistory
from mailchimp3.entities.listinterestcategories import ListInterestCategories
from mailchimp3.entities.listmembers import ListMembers
from mailchimp3.entities.listmergefields import ListMergeFields
from mailchimp3.entities.listsegments import ListSegments
from mailchimp3.entities.listsignupforms import ListSignupForms
from mailchimp3.entities.listwebhooks import ListWebhooks
from mailchimp3.helpers import check_email
class Lists(BaseApi):
def __init__(self, *args, **kwargs):
super(Lists, self).__init__(*args, **kwargs)
self.endpoint = 'lists'
self.list_id = None
self.abuse_reports = ListAbuseReports(self)
self.activity = ListActivity(self)
self.clients = ListClients(self)
self.growth_history = ListGrowthHistory(self)
self.interest_categories = ListInterestCategories(self)
self.members = ListMembers(self)
self.merge_fields = ListMergeFields(self)
self.segments = ListSegments(self)
self.signup_forms = ListSignupForms(self)
self.webhooks = ListWebhooks(self)
async def create(self, data):
if 'name' not in data:
raise KeyError('The list must have a name')
if 'contact' not in data:
raise KeyError('The list must have a contact')
if 'company' not in data['contact']:
raise KeyError('The list contact must have a company')
if 'address1' not in data['contact']:
raise KeyError('The list contact must have a address1')
if 'city' not in data['contact']:
raise KeyError('The list contact must have a city')
if 'state' not in data['contact']:
raise KeyError('The list contact must have a state')
if 'zip' not in data['contact']:
raise KeyError('The list contact must have a zip')
if 'country' not in data['contact']:
raise KeyError('The list contact must have a country')
if 'permission_reminder' not in data:
raise KeyError('The list must have a permission_reminder')
if 'campaign_defaults' not in data:
raise KeyError('The list must have a campaign_defaults')
if 'from_name' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_name')
if 'from_email' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_email')
check_email(data['campaign_defaults']['from_email'])
if 'subject' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a subject')
if 'language' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a language')
if 'email_type_option' not in data:
raise KeyError('The list must have an email_type_option')
if data['email_type_option'] not in [True, False]:
raise TypeError('The list email_type_option must be True or False')
response = await self._mc_client._post(url=self._build_path(), data=data)
if response is not None:
self.list_id = response['id']
else:
self.list_id = None
return response
async def update_members(self, list_id, data):
self.list_id = list_id
if 'members' not in data:
raise KeyError('The update must have at least one member')
else:
if not len(data['members']) <= 500:
raise ValueError('You may only batch sub/unsub 500 members at a time')
for member in data['members']:
if 'email_address' not in member:
raise KeyError('Each list member must have an email_address')
check_email(member['email_address'])
if 'status' not in member and 'status_if_new' not in member:
raise KeyError('Each list member must have either a status or a status_if_new')
valid_statuses = ['subscribed', 'unsubscribed', 'cleaned', 'pending']
if 'status' in member and member['status'] not in valid_statuses:
raise ValueError('The list member status must be one of "subscribed", "unsubscribed", "cleaned", or '
'"pending"')
if 'status_if_new' in member and member['status_if_new'] not in valid_statuses:
raise ValueError('The list member status_if_new must be one of "subscribed", "unsubscribed", '
'"cleaned", or "pending"')
if 'update_existing' not in data:
data['update_existing'] = False
return await self._mc_client._post(url=self._build_path(list_id), data=data)
async def all(self, get_all=False, **queryparams):
self.list_id = None
if get_all:
return await self._iterate(url=self._build_path(), **queryparams)
else:
return await self._mc_client._get(url=self._build_path(), **queryparams)
async def get(self, list_id, **queryparams):
self.list_id = list_id
return await self._mc_client._get(url=self._build_path(list_id), **queryparams)
async def update(self, list_id, data):
self.list_id = list_id
if 'name' not in data:
raise KeyError('The list must have a name')
if 'contact' not in data:
raise KeyError('The list must have a contact')
if 'company' not in data['contact']:
raise KeyError('The list contact must have a company')
if 'address1' not in data['contact']:
raise KeyError('The list contact must have a address1')
if 'city' not in data['contact']:
raise KeyError('The list contact must have a city')
if 'state' not in data['contact']:
raise KeyError('The list contact must have a state')
if 'zip' not in data['contact']:
raise KeyError('The list contact must have a zip')
if 'country' not in data['contact']:
raise KeyError('The list contact must have a country')
if 'permission_reminder' not in data:
raise KeyError('The list must have a permission_reminder')
if 'campaign_defaults' not in data:
raise KeyError('The list must have a campaign_defaults')
if 'from_name' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_name')
if 'from_email' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a from_email')
check_email(data['campaign_defaults']['from_email'])
if 'subject' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a subject')
if 'language' not in data['campaign_defaults']:
raise KeyError('The list campaign_defaults must have a language')
if 'email_type_option' not in data:
raise KeyError('The list must have an email_type_option')
if data['email_type_option'] not in [True, False]:
raise TypeError('The list email_type_option must be True or False')
return await self._mc_client._patch(url=self._build_path(list_id), data=data)
async def delete(self, list_id):
self.list_id = list_id
return await self._mc_client._delete(url=self._build_path(list_id))
| true | true |
f724b6f57b0beae0774790afba29b953900b5714 | 58,559 | py | Python | pilotnet/viz_gamepy.py | Pronton2001/carla_pilotnet | 813ca14e04eccd405fde5fff350fe23c6ada5657 | [
"MIT"
] | null | null | null | pilotnet/viz_gamepy.py | Pronton2001/carla_pilotnet | 813ca14e04eccd405fde5fff350fe23c6ada5657 | [
"MIT"
] | null | null | null | pilotnet/viz_gamepy.py | Pronton2001/carla_pilotnet | 813ca14e04eccd405fde5fff350fe23c6ada5657 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
# Allows controlling a vehicle with a keyboard. For a simpler and more
# documented example, please take a look at tutorial.py.
"""
Welcome to CARLA manual control.
Use ARROWS or WASD keys for control.
W : throttle
S : brake
A/D : steer left/right
Q : toggle reverse
Space : hand-brake
P : toggle autopilot
M : toggle manual transmission
,/. : gear up/down
CTRL + W : toggle constant velocity mode at 60 km/h
L : toggle next light type
SHIFT + L : toggle high beam
Z/X : toggle right/left blinker
I : toggle interior light
TAB : change sensor position
` or N : next sensor
[1-9] : change to sensor [1-9]
G : toggle radar visualization
C : change weather (Shift+C reverse)
Backspace : change vehicle
V : Select next map layer (Shift+V reverse)
B : Load current selected map layer (Shift+B to unload)
R : toggle recording images to disk
T : toggle vehicle's telemetry
CTRL + R : toggle recording of simulation (replacing any previous)
CTRL + P : start replaying last recorded simulation
CTRL + + : increments the start time of the replay by 1 second (+SHIFT = 10 seconds)
CTRL + - : decrements the start time of the replay by 1 second (+SHIFT = 10 seconds)
F1 : toggle HUD
H/? : toggle help
ESC : quit
"""
from __future__ import print_function
# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- imports -------------------------------------------------------------------
# ==============================================================================
import carla
from carla import ColorConverter as cc
import argparse
import collections
import datetime
import logging
import math
import random
import re
import weakref
##################### Added by me #####################
import tensorflow as tf
from tensorflow import expand_dims
from tensorflow.image import resize
import cv2
import model as md
model = md.getPilotNetModel()
model.load_weights('model/model-weights.h5')
try:
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import KMOD_SHIFT
from pygame.locals import K_0
from pygame.locals import K_9
from pygame.locals import K_BACKQUOTE
from pygame.locals import K_BACKSPACE
from pygame.locals import K_COMMA
from pygame.locals import K_DOWN
from pygame.locals import K_ESCAPE
from pygame.locals import K_F1
from pygame.locals import K_LEFT
from pygame.locals import K_PERIOD
from pygame.locals import K_RIGHT
from pygame.locals import K_SLASH
from pygame.locals import K_SPACE
from pygame.locals import K_TAB
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_b
from pygame.locals import K_c
from pygame.locals import K_d
from pygame.locals import K_g
from pygame.locals import K_h
from pygame.locals import K_i
from pygame.locals import K_l
from pygame.locals import K_m
from pygame.locals import K_n
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_t
from pygame.locals import K_v
from pygame.locals import K_w
from pygame.locals import K_x
from pygame.locals import K_z
from pygame.locals import K_MINUS
from pygame.locals import K_EQUALS
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
# ==============================================================================
# -- Global functions ----------------------------------------------------------
# ==============================================================================
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
def get_actor_blueprints(world, filter, generation):
bps = world.get_blueprint_library().filter(filter)
if generation.lower() == "all":
return bps
# If the filter returns only one bp, we assume that this one needed
# and therefore, we ignore the generation
if len(bps) == 1:
return bps
try:
int_generation = int(generation)
# Check if generation is in available generations
if int_generation in [1, 2]:
bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation]
return bps
else:
print(" Warning! Actor Generation is not valid. No actor will be spawned.")
return []
except:
print(" Warning! Actor Generation is not valid. No actor will be spawned.")
return []
# ==============================================================================
# -- World ---------------------------------------------------------------------
# ==============================================================================
class World(object):
def __init__(self, carla_world, hud, args):
self.world = carla_world
self.sync = args.sync
self.actor_role_name = args.rolename
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.hud = hud
self.player = None
self.collision_sensor = None
self.lane_invasion_sensor = None
self.gnss_sensor = None
self.imu_sensor = None
self.radar_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = args.filter
self._actor_generation = args.generation
self._gamma = args.gamma
self.restart()
self.world.on_tick(hud.on_world_tick)
self.recording_enabled = False
self.recording_start = 0
self.constant_velocity_enabled = False
self.show_vehicle_telemetry = False
self.current_map_layer = 0
self.map_layer_names = [
carla.MapLayer.NONE,
carla.MapLayer.Buildings,
carla.MapLayer.Decals,
carla.MapLayer.Foliage,
carla.MapLayer.Ground,
carla.MapLayer.ParkedVehicles,
carla.MapLayer.Particles,
carla.MapLayer.Props,
carla.MapLayer.StreetLights,
carla.MapLayer.Walls,
carla.MapLayer.All
]
def restart(self):
self.player_max_speed = 1.589
self.player_max_speed_fast = 3.713
# Keep same camera config if the camera manager exists.
cam_index = self.camera_manager.index if self.camera_manager is not None else 0
cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
# Get a random blueprint.
blueprint = random.choice(get_actor_blueprints(self.world, self._actor_filter, self._actor_generation))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
# set the max speed
if blueprint.has_attribute('speed'):
self.player_max_speed = float(blueprint.get_attribute('speed').recommended_values[1])
self.player_max_speed_fast = float(blueprint.get_attribute('speed').recommended_values[2])
# Spawn the player.
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
self.modify_vehicle_physics(self.player)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
spawn_points = self.map.get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
self.modify_vehicle_physics(self.player)
# Set up the sensors.
self.collision_sensor = CollisionSensor(self.player, self.hud)
self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)
self.gnss_sensor = GnssSensor(self.player)
self.imu_sensor = IMUSensor(self.player)
self.camera_manager = CameraManager(self.player, self.hud, self._gamma)
self.camera_manager.transform_index = cam_pos_index
self.camera_manager.set_sensor(cam_index, notify=False)
actor_type = get_actor_display_name(self.player)
self.hud.notification(actor_type)
if self.sync:
self.world.tick()
else:
self.world.wait_for_tick()
def next_weather(self, reverse=False):
self._weather_index += -1 if reverse else 1
self._weather_index %= len(self._weather_presets)
preset = self._weather_presets[self._weather_index]
self.hud.notification('Weather: %s' % preset[1])
self.player.get_world().set_weather(preset[0])
def next_map_layer(self, reverse=False):
self.current_map_layer += -1 if reverse else 1
self.current_map_layer %= len(self.map_layer_names)
selected = self.map_layer_names[self.current_map_layer]
self.hud.notification('LayerMap selected: %s' % selected)
def load_map_layer(self, unload=False):
selected = self.map_layer_names[self.current_map_layer]
if unload:
self.hud.notification('Unloading map layer: %s' % selected)
self.world.unload_map_layer(selected)
else:
self.hud.notification('Loading map layer: %s' % selected)
self.world.load_map_layer(selected)
def toggle_radar(self):
if self.radar_sensor is None:
self.radar_sensor = RadarSensor(self.player)
elif self.radar_sensor.sensor is not None:
self.radar_sensor.sensor.destroy()
self.radar_sensor = None
def modify_vehicle_physics(self, actor):
#If actor is not a vehicle, we cannot use the physics control
try:
physics_control = actor.get_physics_control()
physics_control.use_sweep_wheel_collision = True
actor.apply_physics_control(physics_control)
except Exception:
pass
def tick(self, clock):
self.hud.tick(self, clock)
def render(self, display):
self.camera_manager.render(display)
self.hud.render(display)
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
if self.radar_sensor is not None:
self.toggle_radar()
sensors = [
self.camera_manager.sensor,
self.collision_sensor.sensor,
self.lane_invasion_sensor.sensor,
self.gnss_sensor.sensor,
self.imu_sensor.sensor]
for sensor in sensors:
if sensor is not None:
sensor.stop()
sensor.destroy()
if self.player is not None:
self.player.destroy()
# ==============================================================================
# -- KeyboardControl -----------------------------------------------------------
# ==============================================================================
class KeyboardControl(object):
"""Class that handles keyboard input."""
def __init__(self, world, start_in_autopilot):
self._autopilot_enabled = start_in_autopilot
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
self._lights = carla.VehicleLightState.NONE
world.player.set_autopilot(self._autopilot_enabled)
world.player.set_light_state(self._lights)
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
else:
raise NotImplementedError("Actor type not supported")
self._steer_cache = 0.0
world.hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def parse_events(self, client, world, clock, sync_mode):
if isinstance(self._control, carla.VehicleControl):
current_lights = self._lights
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
if self._autopilot_enabled:
world.player.set_autopilot(False)
world.restart()
world.player.set_autopilot(True)
else:
world.restart()
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_v and pygame.key.get_mods() & KMOD_SHIFT:
world.next_map_layer(reverse=True)
elif event.key == K_v:
world.next_map_layer()
elif event.key == K_b and pygame.key.get_mods() & KMOD_SHIFT:
world.load_map_layer(unload=True)
elif event.key == K_b:
world.load_map_layer(unload = True) # i changed False -> True
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
world.hud.help.toggle()
elif event.key == K_TAB:
world.camera_manager.toggle_camera()
elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:
world.next_weather(reverse=True)
elif event.key == K_c:
world.next_weather()
elif event.key == K_g:
world.toggle_radar()
elif event.key == K_BACKQUOTE:
world.camera_manager.next_sensor()
elif event.key == K_n:
world.camera_manager.next_sensor()
elif event.key == K_w and (pygame.key.get_mods() & KMOD_CTRL):
if world.constant_velocity_enabled:
world.player.disable_constant_velocity()
world.constant_velocity_enabled = False
world.hud.notification("Disabled Constant Velocity Mode")
else:
world.player.enable_constant_velocity(carla.Vector3D(17, 0, 0))
world.constant_velocity_enabled = True
world.hud.notification("Enabled Constant Velocity Mode at 60 km/h")
elif event.key == K_t:
if world.show_vehicle_telemetry:
world.player.show_debug_telemetry(False)
world.show_vehicle_telemetry = False
world.hud.notification("Disabled Vehicle Telemetry")
else:
try:
world.player.show_debug_telemetry(True)
world.show_vehicle_telemetry = True
world.hud.notification("Enabled Vehicle Telemetry")
except Exception:
pass
elif event.key > K_0 and event.key <= K_9:
index_ctrl = 0
if pygame.key.get_mods() & KMOD_CTRL:
index_ctrl = 9
world.camera_manager.set_sensor(event.key - 1 - K_0 + index_ctrl)
elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):
world.camera_manager.toggle_recording()
elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):
if (world.recording_enabled):
client.stop_recorder()
world.recording_enabled = False
world.hud.notification("Recorder is OFF")
else:
client.start_recorder("manual_recording.rec")
world.recording_enabled = True
world.hud.notification("Recorder is ON")
elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):
# stop recorder
client.stop_recorder()
world.recording_enabled = False
# work around to fix camera at start of replaying
current_index = world.camera_manager.index
world.destroy_sensors()
# disable autopilot
self._autopilot_enabled = False
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification("Replaying file 'manual_recording.rec'")
# replayer
client.replay_file("manual_recording.rec", world.recording_start, 0, 0)
world.camera_manager.set_sensor(current_index)
elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start -= 10
else:
world.recording_start -= 1
world.hud.notification("Recording start time is %d" % (world.recording_start))
elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start += 10
else:
world.recording_start += 1
world.hud.notification("Recording start time is %d" % (world.recording_start))
if isinstance(self._control, carla.VehicleControl):
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_m:
self._control.manual_gear_shift = not self._control.manual_gear_shift
self._control.gear = world.player.get_control().gear
world.hud.notification('%s Transmission' %
('Manual' if self._control.manual_gear_shift else 'Automatic'))
elif self._control.manual_gear_shift and event.key == K_COMMA:
self._control.gear = max(-1, self._control.gear - 1)
elif self._control.manual_gear_shift and event.key == K_PERIOD:
self._control.gear = self._control.gear + 1
elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:
if not self._autopilot_enabled and not sync_mode:
print("WARNING: You are currently in asynchronous mode and could "
"experience some issues with the traffic simulation")
self._autopilot_enabled = not self._autopilot_enabled
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification(
'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:
current_lights ^= carla.VehicleLightState.Special1
elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:
current_lights ^= carla.VehicleLightState.HighBeam
elif event.key == K_l:
# Use 'L' key to switch between lights:
# closed -> position -> low beam -> fog
if not self._lights & carla.VehicleLightState.Position:
world.hud.notification("Position lights")
current_lights |= carla.VehicleLightState.Position
else:
world.hud.notification("Low beam lights")
current_lights |= carla.VehicleLightState.LowBeam
if self._lights & carla.VehicleLightState.LowBeam:
world.hud.notification("Fog lights")
current_lights |= carla.VehicleLightState.Fog
if self._lights & carla.VehicleLightState.Fog:
world.hud.notification("Lights off")
current_lights ^= carla.VehicleLightState.Position
current_lights ^= carla.VehicleLightState.LowBeam
current_lights ^= carla.VehicleLightState.Fog
elif event.key == K_i:
current_lights ^= carla.VehicleLightState.Interior
elif event.key == K_z:
current_lights ^= carla.VehicleLightState.LeftBlinker
elif event.key == K_x:
current_lights ^= carla.VehicleLightState.RightBlinker
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
self._control.reverse = self._control.gear < 0
# Set automatic control-related vehicle lights
if self._control.brake:
current_lights |= carla.VehicleLightState.Brake
else: # Remove the Brake flag
current_lights &= ~carla.VehicleLightState.Brake
if self._control.reverse:
current_lights |= carla.VehicleLightState.Reverse
else: # Remove the Reverse flag
current_lights &= ~carla.VehicleLightState.Reverse
if current_lights != self._lights: # Change the light state only if necessary
self._lights = current_lights
world.player.set_light_state(carla.VehicleLightState(self._lights))
elif isinstance(self._control, carla.WalkerControl):
self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)
world.player.apply_control(self._control)
def _parse_vehicle_keys(self, keys, milliseconds):
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(self._control.throttle + 0.01, 1.00)
else:
self._control.throttle = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.brake = min(self._control.brake + 0.2, 1)
else:
self._control.brake = 0
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.hand_brake = keys[K_SPACE]
def _parse_walker_keys(self, keys, milliseconds, world):
self._control.speed = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.speed = 0.0
if keys[K_LEFT] or keys[K_a]:
self._control.speed = .01
self._rotation.yaw -= 0.08 * milliseconds
if keys[K_RIGHT] or keys[K_d]:
self._control.speed = .01
self._rotation.yaw += 0.08 * milliseconds
if keys[K_UP] or keys[K_w]:
self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed
self._control.jump = keys[K_SPACE]
self._rotation.yaw = round(self._rotation.yaw, 1)
self._control.direction = self._rotation.get_forward_vector()
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
# ==============================================================================
# -- HUD -----------------------------------------------------------------------
# ==============================================================================
class HUD(object):
def __init__(self, width, height):
self.dim = (width, height)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 16), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, world, clock):
self._notifications.tick(world, clock)
if not self._show_info:
return
t = world.player.get_transform()
v = world.player.get_velocity()
c = world.player.get_control()
compass = world.imu_sensor.compass
heading = 'N' if compass > 270.5 or compass < 89.5 else ''
heading += 'S' if 90.5 < compass < 269.5 else ''
heading += 'E' if 0.5 < compass < 179.5 else ''
heading += 'W' if 180.5 < compass < 359.5 else ''
colhist = world.collision_sensor.get_collision_history()
collision = [colhist[x + self.frame - 200] for x in range(0, 200)]
max_col = max(1.0, max(collision))
collision = [x / max_col for x in collision]
vehicles = world.world.get_actors().filter('vehicle.*')
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'',
'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),
'Map: % 20s' % world.map.name.split('/')[-1],
'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),
u'Compass:% 17.0f\N{DEGREE SIGN} % 2s' % (compass, heading),
'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer),
'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),
'Height: % 18.0f m' % t.location.z,
'']
if isinstance(c, carla.VehicleControl):
self._info_text += [
('Throttle:', c.throttle, 0.0, 1.0),
('Steer:', c.steer, -1.0, 1.0),
('Brake:', c.brake, 0.0, 1.0),
('Reverse:', c.reverse),
('Hand brake:', c.hand_brake),
('Manual:', c.manual_gear_shift),
'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)]
elif isinstance(c, carla.WalkerControl):
self._info_text += [
('Speed:', c.speed, 0.0, 5.556),
('Jump:', c.jump)]
self._info_text += [
'',
'Collision:',
collision,
'',
'Number of vehicles: % 8d' % len(vehicles)]
if len(vehicles) > 1:
self._info_text += ['Nearby vehicles:']
distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)
vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id]
for d, vehicle in sorted(vehicles, key=lambda vehicles: vehicles[0]):
if d > 200.0:
break
vehicle_type = get_actor_display_name(vehicle, truncate=22)
self._info_text.append('% 4dm %s' % (d, vehicle_type))
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
f = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect)
item = item[0]
if item: # At this point has to be a str.
surface = self._font_mono.render(item, True, (255, 255, 255))
display.blit(surface, (8, v_offset))
v_offset += 18
self._notifications.render(display)
self.help.render(display)
# ==============================================================================
# -- FadingText ----------------------------------------------------------------
# ==============================================================================
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, _, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
# ==============================================================================
# -- HelpText ------------------------------------------------------------------
# ==============================================================================
class HelpText(object):
"""Helper class to handle text output using pygame"""
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.line_space = 18
self.dim = (780, len(lines) * self.line_space + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * self.line_space))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
# ==============================================================================
# -- CollisionSensor -----------------------------------------------------------
# ==============================================================================
class CollisionSensor(object):
def __init__(self, parent_actor, hud):
self.sensor = None
self.history = []
self._parent = parent_actor
self.hud = hud
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.collision')
self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))
def get_collision_history(self):
history = collections.defaultdict(int)
for frame, intensity in self.history:
history[frame] += intensity
return history
@staticmethod
def _on_collision(weak_self, event):
self = weak_self()
if not self:
return
actor_type = get_actor_display_name(event.other_actor)
self.hud.notification('Collision with %r' % actor_type)
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
self.history.append((event.frame, intensity))
if len(self.history) > 4000:
self.history.pop(0)
# ==============================================================================
# -- LaneInvasionSensor --------------------------------------------------------
# ==============================================================================
class LaneInvasionSensor(object):
def __init__(self, parent_actor, hud):
self.sensor = None
# If the spawn object is not a vehicle, we cannot use the Lane Invasion Sensor
if parent_actor.type_id.startswith("vehicle."):
self._parent = parent_actor
self.hud = hud
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.lane_invasion')
self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))
@staticmethod
def _on_invasion(weak_self, event):
self = weak_self()
if not self:
return
lane_types = set(x.type for x in event.crossed_lane_markings)
text = ['%r' % str(x).split()[-1] for x in lane_types]
self.hud.notification('Crossed line %s' % ' and '.join(text))
# ==============================================================================
# -- GnssSensor ----------------------------------------------------------------
# ==============================================================================
class GnssSensor(object):
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
self.lat = 0.0
self.lon = 0.0
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.gnss')
self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event))
@staticmethod
def _on_gnss_event(weak_self, event):
self = weak_self()
if not self:
return
self.lat = event.latitude
self.lon = event.longitude
# ==============================================================================
# -- IMUSensor -----------------------------------------------------------------
# ==============================================================================
class IMUSensor(object):
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
self.accelerometer = (0.0, 0.0, 0.0)
self.gyroscope = (0.0, 0.0, 0.0)
self.compass = 0.0
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.imu')
self.sensor = world.spawn_actor(
bp, carla.Transform(), attach_to=self._parent)
# We need to pass the lambda a weak reference to self to avoid circular
# reference.
weak_self = weakref.ref(self)
self.sensor.listen(
lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data))
@staticmethod
def _IMU_callback(weak_self, sensor_data):
self = weak_self()
if not self:
return
limits = (-99.9, 99.9)
self.accelerometer = (
max(limits[0], min(limits[1], sensor_data.accelerometer.x)),
max(limits[0], min(limits[1], sensor_data.accelerometer.y)),
max(limits[0], min(limits[1], sensor_data.accelerometer.z)))
self.gyroscope = (
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))),
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))),
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z))))
self.compass = math.degrees(sensor_data.compass)
# ==============================================================================
# -- RadarSensor ---------------------------------------------------------------
# ==============================================================================
class RadarSensor(object):
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
bound_x = 0.5 + self._parent.bounding_box.extent.x
bound_y = 0.5 + self._parent.bounding_box.extent.y
bound_z = 0.5 + self._parent.bounding_box.extent.z
self.velocity_range = 7.5 # m/s
world = self._parent.get_world()
self.debug = world.debug
bp = world.get_blueprint_library().find('sensor.other.radar')
bp.set_attribute('horizontal_fov', str(35))
bp.set_attribute('vertical_fov', str(20))
self.sensor = world.spawn_actor(
bp,
carla.Transform(
carla.Location(x=bound_x + 0.05, z=bound_z+0.05),
carla.Rotation(pitch=5)),
attach_to=self._parent)
# We need a weak reference to self to avoid circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(
lambda radar_data: RadarSensor._Radar_callback(weak_self, radar_data))
@staticmethod
def _Radar_callback(weak_self, radar_data):
self = weak_self()
if not self:
return
# To get a numpy [[vel, altitude, azimuth, depth],...[,,,]]:
# points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))
# points = np.reshape(points, (len(radar_data), 4))
current_rot = radar_data.transform.rotation
for detect in radar_data:
azi = math.degrees(detect.azimuth)
alt = math.degrees(detect.altitude)
# The 0.25 adjusts a bit the distance so the dots can
# be properly seen
fw_vec = carla.Vector3D(x=detect.depth - 0.25)
carla.Transform(
carla.Location(),
carla.Rotation(
pitch=current_rot.pitch + alt,
yaw=current_rot.yaw + azi,
roll=current_rot.roll)).transform(fw_vec)
def clamp(min_v, max_v, value):
return max(min_v, min(value, max_v))
norm_velocity = detect.velocity / self.velocity_range # range [-1, 1]
r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0)
g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0)
b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0)
self.debug.draw_point(
radar_data.transform.location + fw_vec,
size=0.075,
life_time=0.06,
persistent_lines=False,
color=carla.Color(r, g, b))
# ==============================================================================
# -- CameraManager -------------------------------------------------------------
# ==============================================================================
class CameraManager(object):
def __init__(self, parent_actor, hud, gamma_correction):
self.sensor = None
self.surface = None
self._parent = parent_actor
self.hud = hud
self.recording = False
bound_x = 0.5 + self._parent.bounding_box.extent.x
bound_y = 0.5 + self._parent.bounding_box.extent.y
bound_z = 0.5 + self._parent.bounding_box.extent.z
Attachment = carla.AttachmentType
if not self._parent.type_id.startswith("walker.pedestrian"):
self._camera_transforms = [
(carla.Transform(carla.Location(x=-2.0*bound_x, y=+0.0*bound_y, z=2.0*bound_z), carla.Rotation(pitch=8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=+0.8*bound_x, y=+0.0*bound_y, z=1.3*bound_z)), Attachment.Rigid),
(carla.Transform(carla.Location(x=+1.9*bound_x, y=+1.0*bound_y, z=1.2*bound_z)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-2.8*bound_x, y=+0.0*bound_y, z=4.6*bound_z), carla.Rotation(pitch=6.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-1.0, y=-1.0*bound_y, z=0.4*bound_z)), Attachment.Rigid)]
else:
self._camera_transforms = [
(carla.Transform(carla.Location(x=-2.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid),
(carla.Transform(carla.Location(x=2.5, y=0.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-4.0, z=2.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=0, y=-2.5, z=-0.0), carla.Rotation(yaw=90.0)), Attachment.Rigid)]
self.transform_index = 1
self.sensors = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}],
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],
['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)', {}],
['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)', {}],
['sensor.camera.semantic_segmentation', cc.CityScapesPalette,
'Camera Semantic Segmentation (CityScapes Palette)', {}],
['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', {'range': '50'}],
['sensor.camera.dvs', cc.Raw, 'Dynamic Vision Sensor', {}],
['sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted',
{'lens_circle_multiplier': '3.0',
'lens_circle_falloff': '3.0',
'chromatic_aberration_intensity': '0.5',
'chromatic_aberration_offset': '0'}],
['sensor.camera.optical_flow', cc.Raw, 'Optical Flow', {}],
]
world = self._parent.get_world()
bp_library = world.get_blueprint_library()
for item in self.sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
if bp.has_attribute('gamma'):
bp.set_attribute('gamma', str(gamma_correction))
for attr_name, attr_value in item[3].items():
bp.set_attribute(attr_name, attr_value)
elif item[0].startswith('sensor.lidar'):
self.lidar_range = 50
for attr_name, attr_value in item[3].items():
bp.set_attribute(attr_name, attr_value)
if attr_name == 'range':
self.lidar_range = float(attr_value)
item.append(bp)
self.index = None
def toggle_camera(self):
self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)
self.set_sensor(self.index, notify=False, force_respawn=True)
def set_sensor(self, index, notify=True, force_respawn=False):
index = index % len(self.sensors)
needs_respawn = True if self.index is None else \
(force_respawn or (self.sensors[index][2] != self.sensors[self.index][2]))
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self.surface = None
self.sensor = self._parent.get_world().spawn_actor(
self.sensors[index][-1],
self._camera_transforms[self.transform_index][0],
attach_to=self._parent,
attachment_type=self._camera_transforms[self.transform_index][1])
# We need to pass the lambda a weak reference to self to avoid
# circular reference.
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self.hud.notification(self.sensors[index][2])
self.index = index
def next_sensor(self):
self.set_sensor(self.index + 1)
def toggle_recording(self):
self.recording = not self.recording
self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def scale_steer(steer):
if (steer >= 1):
return 1
elif (steer <= -1):
return -1
else:
return steer
@staticmethod
def predict_steering(img_rgb):
img_size = (66, 200, 3)
input_img = resize(img_rgb, img_size[:2])
input_img = expand_dims(input_img, 0) # Create batch axis
steering_pred= model.predict(input_img)[0][0]
return steering_pred
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
if self.sensors[self.index][0].startswith('sensor.lidar'):
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0] / 4), 4))
lidar_data = np.array(points[:, :2])
lidar_data *= min(self.hud.dim) / (2.0 * self.lidar_range)
lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1])
lidar_data = np.fabs(lidar_data) # pylint: disable=E1111
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3)
lidar_img = np.zeros((lidar_img_size), dtype=np.uint8)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
self.surface = pygame.surfarray.make_surface(lidar_img)
elif self.sensors[self.index][0].startswith('sensor.camera.dvs'):
# Example of converting the raw_data from a carla.DVSEventArray
# sensor into a NumPy array and using it as an image
dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([
('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))
dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8)
# Blue is positive, red is negative
dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255
self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))
elif self.sensors[self.index][0].startswith('sensor.camera.optical_flow'):
image = image.get_color_coded_flow()
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
else:
image.convert(self.sensors[self.index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4)) # RGBA
array = array[:, :, :3] # RGBA -> RGB
# array = array[:, :, ::-1]
img_rgb = cv2.resize(np.float32(array), (320, 180))
pred_steering = CameraManager.predict_steering(img_rgb)
print('before scale', pred_steering)
pred_steering /=70
print('after scale', pred_steering)
pred_steering = CameraManager.scale_steer(pred_steering)
print("Predicted steering: ", pred_steering)
self._parent.apply_control(carla.VehicleControl(throttle=0.9, steer=1))
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self.recording:
image.save_to_disk('_out/%08d' % image.frame)
# ==============================================================================
# -- game_loop() ---------------------------------------------------------------
# ==============================================================================
def game_loop(args):
pygame.init()
pygame.font.init()
world = None
original_settings = None
try:
client = carla.Client(args.host, args.port)
client.set_timeout(20.0)
sim_world = client.get_world()
if args.sync:
original_settings = sim_world.get_settings()
settings = sim_world.get_settings()
if not settings.synchronous_mode:
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
sim_world.apply_settings(settings)
traffic_manager = client.get_trafficmanager()
traffic_manager.set_synchronous_mode(True)
if args.autopilot and not sim_world.get_settings().synchronous_mode:
print("WARNING: You are currently in asynchronous mode and could "
"experience some issues with the traffic simulation")
# Remove all layer for fast rendering
sim_world.unload_map_layer(carla.MapLayer.All)
# settings = sim_world.get_settings()
# settings.fixed_delta_seconds = None # Set a variable time-step
# sim_world.apply_settings(settings)
display = pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
display.fill((0,0,0))
pygame.display.flip()
hud = HUD(args.width, args.height)
world = World(sim_world, hud, args)
controller = KeyboardControl(world, args.autopilot)
if args.sync:
sim_world.tick()
else:
sim_world.wait_for_tick()
clock = pygame.time.Clock()
while True:
if args.sync:
sim_world.tick()
clock.tick_busy_loop(60)
if controller.parse_events(client, world, clock, args.sync):
return
world.tick(clock)
world.render(display)
pygame.display.flip()
finally:
if original_settings:
sim_world.apply_settings(original_settings)
if (world and world.recording_enabled):
client.stop_recorder()
if world is not None:
world.destroy()
pygame.quit()
# ==============================================================================
# -- main() --------------------------------------------------------------------
# ==============================================================================
def main():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.*',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--generation',
metavar='G',
default='2',
help='restrict to certain actor generation (values: "1","2","All" - default: "2")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
argparser.add_argument(
'--gamma',
default=2.2,
type=float,
help='Gamma correction of the camera (default: 2.2)')
argparser.add_argument(
'--sync',
action='store_true',
help='Activate synchronous mode execution')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
try:
game_loop(args)
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
if __name__ == '__main__':
main()
| 43.409192 | 146 | 0.54878 |
from __future__ import print_function
import glob
import os
import sys
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
import carla
from carla import ColorConverter as cc
import argparse
import collections
import datetime
import logging
import math
import random
import re
import weakref
.locals import K_l
from pygame.locals import K_m
from pygame.locals import K_n
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_t
from pygame.locals import K_v
from pygame.locals import K_w
from pygame.locals import K_x
from pygame.locals import K_z
from pygame.locals import K_MINUS
from pygame.locals import K_EQUALS
except ImportError:
raise RuntimeError('cannot import pygame, make sure pygame package is installed')
try:
import numpy as np
except ImportError:
raise RuntimeError('cannot import numpy, make sure numpy package is installed')
def find_weather_presets():
rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
return [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
def get_actor_display_name(actor, truncate=250):
name = ' '.join(actor.type_id.replace('_', '.').title().split('.')[1:])
return (name[:truncate - 1] + u'\u2026') if len(name) > truncate else name
def get_actor_blueprints(world, filter, generation):
bps = world.get_blueprint_library().filter(filter)
if generation.lower() == "all":
return bps
if len(bps) == 1:
return bps
try:
int_generation = int(generation)
if int_generation in [1, 2]:
bps = [x for x in bps if int(x.get_attribute('generation')) == int_generation]
return bps
else:
print(" Warning! Actor Generation is not valid. No actor will be spawned.")
return []
except:
print(" Warning! Actor Generation is not valid. No actor will be spawned.")
return []
class World(object):
def __init__(self, carla_world, hud, args):
self.world = carla_world
self.sync = args.sync
self.actor_role_name = args.rolename
try:
self.map = self.world.get_map()
except RuntimeError as error:
print('RuntimeError: {}'.format(error))
print(' The server could not send the OpenDRIVE (.xodr) file:')
print(' Make sure it exists, has the same name of your town, and is correct.')
sys.exit(1)
self.hud = hud
self.player = None
self.collision_sensor = None
self.lane_invasion_sensor = None
self.gnss_sensor = None
self.imu_sensor = None
self.radar_sensor = None
self.camera_manager = None
self._weather_presets = find_weather_presets()
self._weather_index = 0
self._actor_filter = args.filter
self._actor_generation = args.generation
self._gamma = args.gamma
self.restart()
self.world.on_tick(hud.on_world_tick)
self.recording_enabled = False
self.recording_start = 0
self.constant_velocity_enabled = False
self.show_vehicle_telemetry = False
self.current_map_layer = 0
self.map_layer_names = [
carla.MapLayer.NONE,
carla.MapLayer.Buildings,
carla.MapLayer.Decals,
carla.MapLayer.Foliage,
carla.MapLayer.Ground,
carla.MapLayer.ParkedVehicles,
carla.MapLayer.Particles,
carla.MapLayer.Props,
carla.MapLayer.StreetLights,
carla.MapLayer.Walls,
carla.MapLayer.All
]
def restart(self):
self.player_max_speed = 1.589
self.player_max_speed_fast = 3.713
cam_index = self.camera_manager.index if self.camera_manager is not None else 0
cam_pos_index = self.camera_manager.transform_index if self.camera_manager is not None else 0
blueprint = random.choice(get_actor_blueprints(self.world, self._actor_filter, self._actor_generation))
blueprint.set_attribute('role_name', self.actor_role_name)
if blueprint.has_attribute('color'):
color = random.choice(blueprint.get_attribute('color').recommended_values)
blueprint.set_attribute('color', color)
if blueprint.has_attribute('driver_id'):
driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
blueprint.set_attribute('driver_id', driver_id)
if blueprint.has_attribute('is_invincible'):
blueprint.set_attribute('is_invincible', 'true')
if blueprint.has_attribute('speed'):
self.player_max_speed = float(blueprint.get_attribute('speed').recommended_values[1])
self.player_max_speed_fast = float(blueprint.get_attribute('speed').recommended_values[2])
if self.player is not None:
spawn_point = self.player.get_transform()
spawn_point.location.z += 2.0
spawn_point.rotation.roll = 0.0
spawn_point.rotation.pitch = 0.0
self.destroy()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
self.modify_vehicle_physics(self.player)
while self.player is None:
if not self.map.get_spawn_points():
print('There are no spawn points available in your map/town.')
print('Please add some Vehicle Spawn Point to your UE4 scene.')
sys.exit(1)
spawn_points = self.map.get_spawn_points()
spawn_point = random.choice(spawn_points) if spawn_points else carla.Transform()
self.player = self.world.try_spawn_actor(blueprint, spawn_point)
self.modify_vehicle_physics(self.player)
self.collision_sensor = CollisionSensor(self.player, self.hud)
self.lane_invasion_sensor = LaneInvasionSensor(self.player, self.hud)
self.gnss_sensor = GnssSensor(self.player)
self.imu_sensor = IMUSensor(self.player)
self.camera_manager = CameraManager(self.player, self.hud, self._gamma)
self.camera_manager.transform_index = cam_pos_index
self.camera_manager.set_sensor(cam_index, notify=False)
actor_type = get_actor_display_name(self.player)
self.hud.notification(actor_type)
if self.sync:
self.world.tick()
else:
self.world.wait_for_tick()
def next_weather(self, reverse=False):
self._weather_index += -1 if reverse else 1
self._weather_index %= len(self._weather_presets)
preset = self._weather_presets[self._weather_index]
self.hud.notification('Weather: %s' % preset[1])
self.player.get_world().set_weather(preset[0])
def next_map_layer(self, reverse=False):
self.current_map_layer += -1 if reverse else 1
self.current_map_layer %= len(self.map_layer_names)
selected = self.map_layer_names[self.current_map_layer]
self.hud.notification('LayerMap selected: %s' % selected)
def load_map_layer(self, unload=False):
selected = self.map_layer_names[self.current_map_layer]
if unload:
self.hud.notification('Unloading map layer: %s' % selected)
self.world.unload_map_layer(selected)
else:
self.hud.notification('Loading map layer: %s' % selected)
self.world.load_map_layer(selected)
def toggle_radar(self):
if self.radar_sensor is None:
self.radar_sensor = RadarSensor(self.player)
elif self.radar_sensor.sensor is not None:
self.radar_sensor.sensor.destroy()
self.radar_sensor = None
def modify_vehicle_physics(self, actor):
try:
physics_control = actor.get_physics_control()
physics_control.use_sweep_wheel_collision = True
actor.apply_physics_control(physics_control)
except Exception:
pass
def tick(self, clock):
self.hud.tick(self, clock)
def render(self, display):
self.camera_manager.render(display)
self.hud.render(display)
def destroy_sensors(self):
self.camera_manager.sensor.destroy()
self.camera_manager.sensor = None
self.camera_manager.index = None
def destroy(self):
if self.radar_sensor is not None:
self.toggle_radar()
sensors = [
self.camera_manager.sensor,
self.collision_sensor.sensor,
self.lane_invasion_sensor.sensor,
self.gnss_sensor.sensor,
self.imu_sensor.sensor]
for sensor in sensors:
if sensor is not None:
sensor.stop()
sensor.destroy()
if self.player is not None:
self.player.destroy()
class KeyboardControl(object):
def __init__(self, world, start_in_autopilot):
self._autopilot_enabled = start_in_autopilot
if isinstance(world.player, carla.Vehicle):
self._control = carla.VehicleControl()
self._lights = carla.VehicleLightState.NONE
world.player.set_autopilot(self._autopilot_enabled)
world.player.set_light_state(self._lights)
elif isinstance(world.player, carla.Walker):
self._control = carla.WalkerControl()
self._autopilot_enabled = False
self._rotation = world.player.get_transform().rotation
else:
raise NotImplementedError("Actor type not supported")
self._steer_cache = 0.0
world.hud.notification("Press 'H' or '?' for help.", seconds=4.0)
def parse_events(self, client, world, clock, sync_mode):
if isinstance(self._control, carla.VehicleControl):
current_lights = self._lights
for event in pygame.event.get():
if event.type == pygame.QUIT:
return True
elif event.type == pygame.KEYUP:
if self._is_quit_shortcut(event.key):
return True
elif event.key == K_BACKSPACE:
if self._autopilot_enabled:
world.player.set_autopilot(False)
world.restart()
world.player.set_autopilot(True)
else:
world.restart()
elif event.key == K_F1:
world.hud.toggle_info()
elif event.key == K_v and pygame.key.get_mods() & KMOD_SHIFT:
world.next_map_layer(reverse=True)
elif event.key == K_v:
world.next_map_layer()
elif event.key == K_b and pygame.key.get_mods() & KMOD_SHIFT:
world.load_map_layer(unload=True)
elif event.key == K_b:
world.load_map_layer(unload = True)
elif event.key == K_h or (event.key == K_SLASH and pygame.key.get_mods() & KMOD_SHIFT):
world.hud.help.toggle()
elif event.key == K_TAB:
world.camera_manager.toggle_camera()
elif event.key == K_c and pygame.key.get_mods() & KMOD_SHIFT:
world.next_weather(reverse=True)
elif event.key == K_c:
world.next_weather()
elif event.key == K_g:
world.toggle_radar()
elif event.key == K_BACKQUOTE:
world.camera_manager.next_sensor()
elif event.key == K_n:
world.camera_manager.next_sensor()
elif event.key == K_w and (pygame.key.get_mods() & KMOD_CTRL):
if world.constant_velocity_enabled:
world.player.disable_constant_velocity()
world.constant_velocity_enabled = False
world.hud.notification("Disabled Constant Velocity Mode")
else:
world.player.enable_constant_velocity(carla.Vector3D(17, 0, 0))
world.constant_velocity_enabled = True
world.hud.notification("Enabled Constant Velocity Mode at 60 km/h")
elif event.key == K_t:
if world.show_vehicle_telemetry:
world.player.show_debug_telemetry(False)
world.show_vehicle_telemetry = False
world.hud.notification("Disabled Vehicle Telemetry")
else:
try:
world.player.show_debug_telemetry(True)
world.show_vehicle_telemetry = True
world.hud.notification("Enabled Vehicle Telemetry")
except Exception:
pass
elif event.key > K_0 and event.key <= K_9:
index_ctrl = 0
if pygame.key.get_mods() & KMOD_CTRL:
index_ctrl = 9
world.camera_manager.set_sensor(event.key - 1 - K_0 + index_ctrl)
elif event.key == K_r and not (pygame.key.get_mods() & KMOD_CTRL):
world.camera_manager.toggle_recording()
elif event.key == K_r and (pygame.key.get_mods() & KMOD_CTRL):
if (world.recording_enabled):
client.stop_recorder()
world.recording_enabled = False
world.hud.notification("Recorder is OFF")
else:
client.start_recorder("manual_recording.rec")
world.recording_enabled = True
world.hud.notification("Recorder is ON")
elif event.key == K_p and (pygame.key.get_mods() & KMOD_CTRL):
client.stop_recorder()
world.recording_enabled = False
current_index = world.camera_manager.index
world.destroy_sensors()
self._autopilot_enabled = False
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification("Replaying file 'manual_recording.rec'")
client.replay_file("manual_recording.rec", world.recording_start, 0, 0)
world.camera_manager.set_sensor(current_index)
elif event.key == K_MINUS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start -= 10
else:
world.recording_start -= 1
world.hud.notification("Recording start time is %d" % (world.recording_start))
elif event.key == K_EQUALS and (pygame.key.get_mods() & KMOD_CTRL):
if pygame.key.get_mods() & KMOD_SHIFT:
world.recording_start += 10
else:
world.recording_start += 1
world.hud.notification("Recording start time is %d" % (world.recording_start))
if isinstance(self._control, carla.VehicleControl):
if event.key == K_q:
self._control.gear = 1 if self._control.reverse else -1
elif event.key == K_m:
self._control.manual_gear_shift = not self._control.manual_gear_shift
self._control.gear = world.player.get_control().gear
world.hud.notification('%s Transmission' %
('Manual' if self._control.manual_gear_shift else 'Automatic'))
elif self._control.manual_gear_shift and event.key == K_COMMA:
self._control.gear = max(-1, self._control.gear - 1)
elif self._control.manual_gear_shift and event.key == K_PERIOD:
self._control.gear = self._control.gear + 1
elif event.key == K_p and not pygame.key.get_mods() & KMOD_CTRL:
if not self._autopilot_enabled and not sync_mode:
print("WARNING: You are currently in asynchronous mode and could "
"experience some issues with the traffic simulation")
self._autopilot_enabled = not self._autopilot_enabled
world.player.set_autopilot(self._autopilot_enabled)
world.hud.notification(
'Autopilot %s' % ('On' if self._autopilot_enabled else 'Off'))
elif event.key == K_l and pygame.key.get_mods() & KMOD_CTRL:
current_lights ^= carla.VehicleLightState.Special1
elif event.key == K_l and pygame.key.get_mods() & KMOD_SHIFT:
current_lights ^= carla.VehicleLightState.HighBeam
elif event.key == K_l:
if not self._lights & carla.VehicleLightState.Position:
world.hud.notification("Position lights")
current_lights |= carla.VehicleLightState.Position
else:
world.hud.notification("Low beam lights")
current_lights |= carla.VehicleLightState.LowBeam
if self._lights & carla.VehicleLightState.LowBeam:
world.hud.notification("Fog lights")
current_lights |= carla.VehicleLightState.Fog
if self._lights & carla.VehicleLightState.Fog:
world.hud.notification("Lights off")
current_lights ^= carla.VehicleLightState.Position
current_lights ^= carla.VehicleLightState.LowBeam
current_lights ^= carla.VehicleLightState.Fog
elif event.key == K_i:
current_lights ^= carla.VehicleLightState.Interior
elif event.key == K_z:
current_lights ^= carla.VehicleLightState.LeftBlinker
elif event.key == K_x:
current_lights ^= carla.VehicleLightState.RightBlinker
if not self._autopilot_enabled:
if isinstance(self._control, carla.VehicleControl):
self._parse_vehicle_keys(pygame.key.get_pressed(), clock.get_time())
self._control.reverse = self._control.gear < 0
if self._control.brake:
current_lights |= carla.VehicleLightState.Brake
else:
current_lights &= ~carla.VehicleLightState.Brake
if self._control.reverse:
current_lights |= carla.VehicleLightState.Reverse
else:
current_lights &= ~carla.VehicleLightState.Reverse
if current_lights != self._lights:
self._lights = current_lights
world.player.set_light_state(carla.VehicleLightState(self._lights))
elif isinstance(self._control, carla.WalkerControl):
self._parse_walker_keys(pygame.key.get_pressed(), clock.get_time(), world)
world.player.apply_control(self._control)
def _parse_vehicle_keys(self, keys, milliseconds):
if keys[K_UP] or keys[K_w]:
self._control.throttle = min(self._control.throttle + 0.01, 1.00)
else:
self._control.throttle = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.brake = min(self._control.brake + 0.2, 1)
else:
self._control.brake = 0
steer_increment = 5e-4 * milliseconds
if keys[K_LEFT] or keys[K_a]:
if self._steer_cache > 0:
self._steer_cache = 0
else:
self._steer_cache -= steer_increment
elif keys[K_RIGHT] or keys[K_d]:
if self._steer_cache < 0:
self._steer_cache = 0
else:
self._steer_cache += steer_increment
else:
self._steer_cache = 0.0
self._steer_cache = min(0.7, max(-0.7, self._steer_cache))
self._control.steer = round(self._steer_cache, 1)
self._control.hand_brake = keys[K_SPACE]
def _parse_walker_keys(self, keys, milliseconds, world):
self._control.speed = 0.0
if keys[K_DOWN] or keys[K_s]:
self._control.speed = 0.0
if keys[K_LEFT] or keys[K_a]:
self._control.speed = .01
self._rotation.yaw -= 0.08 * milliseconds
if keys[K_RIGHT] or keys[K_d]:
self._control.speed = .01
self._rotation.yaw += 0.08 * milliseconds
if keys[K_UP] or keys[K_w]:
self._control.speed = world.player_max_speed_fast if pygame.key.get_mods() & KMOD_SHIFT else world.player_max_speed
self._control.jump = keys[K_SPACE]
self._rotation.yaw = round(self._rotation.yaw, 1)
self._control.direction = self._rotation.get_forward_vector()
@staticmethod
def _is_quit_shortcut(key):
return (key == K_ESCAPE) or (key == K_q and pygame.key.get_mods() & KMOD_CTRL)
class HUD(object):
def __init__(self, width, height):
self.dim = (width, height)
font = pygame.font.Font(pygame.font.get_default_font(), 20)
font_name = 'courier' if os.name == 'nt' else 'mono'
fonts = [x for x in pygame.font.get_fonts() if font_name in x]
default_font = 'ubuntumono'
mono = default_font if default_font in fonts else fonts[0]
mono = pygame.font.match_font(mono)
self._font_mono = pygame.font.Font(mono, 12 if os.name == 'nt' else 14)
self._notifications = FadingText(font, (width, 40), (0, height - 40))
self.help = HelpText(pygame.font.Font(mono, 16), width, height)
self.server_fps = 0
self.frame = 0
self.simulation_time = 0
self._show_info = True
self._info_text = []
self._server_clock = pygame.time.Clock()
def on_world_tick(self, timestamp):
self._server_clock.tick()
self.server_fps = self._server_clock.get_fps()
self.frame = timestamp.frame
self.simulation_time = timestamp.elapsed_seconds
def tick(self, world, clock):
self._notifications.tick(world, clock)
if not self._show_info:
return
t = world.player.get_transform()
v = world.player.get_velocity()
c = world.player.get_control()
compass = world.imu_sensor.compass
heading = 'N' if compass > 270.5 or compass < 89.5 else ''
heading += 'S' if 90.5 < compass < 269.5 else ''
heading += 'E' if 0.5 < compass < 179.5 else ''
heading += 'W' if 180.5 < compass < 359.5 else ''
colhist = world.collision_sensor.get_collision_history()
collision = [colhist[x + self.frame - 200] for x in range(0, 200)]
max_col = max(1.0, max(collision))
collision = [x / max_col for x in collision]
vehicles = world.world.get_actors().filter('vehicle.*')
self._info_text = [
'Server: % 16.0f FPS' % self.server_fps,
'Client: % 16.0f FPS' % clock.get_fps(),
'',
'Vehicle: % 20s' % get_actor_display_name(world.player, truncate=20),
'Map: % 20s' % world.map.name.split('/')[-1],
'Simulation time: % 12s' % datetime.timedelta(seconds=int(self.simulation_time)),
'',
'Speed: % 15.0f km/h' % (3.6 * math.sqrt(v.x**2 + v.y**2 + v.z**2)),
u'Compass:% 17.0f\N{DEGREE SIGN} % 2s' % (compass, heading),
'Accelero: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.accelerometer),
'Gyroscop: (%5.1f,%5.1f,%5.1f)' % (world.imu_sensor.gyroscope),
'Location:% 20s' % ('(% 5.1f, % 5.1f)' % (t.location.x, t.location.y)),
'GNSS:% 24s' % ('(% 2.6f, % 3.6f)' % (world.gnss_sensor.lat, world.gnss_sensor.lon)),
'Height: % 18.0f m' % t.location.z,
'']
if isinstance(c, carla.VehicleControl):
self._info_text += [
('Throttle:', c.throttle, 0.0, 1.0),
('Steer:', c.steer, -1.0, 1.0),
('Brake:', c.brake, 0.0, 1.0),
('Reverse:', c.reverse),
('Hand brake:', c.hand_brake),
('Manual:', c.manual_gear_shift),
'Gear: %s' % {-1: 'R', 0: 'N'}.get(c.gear, c.gear)]
elif isinstance(c, carla.WalkerControl):
self._info_text += [
('Speed:', c.speed, 0.0, 5.556),
('Jump:', c.jump)]
self._info_text += [
'',
'Collision:',
collision,
'',
'Number of vehicles: % 8d' % len(vehicles)]
if len(vehicles) > 1:
self._info_text += ['Nearby vehicles:']
distance = lambda l: math.sqrt((l.x - t.location.x)**2 + (l.y - t.location.y)**2 + (l.z - t.location.z)**2)
vehicles = [(distance(x.get_location()), x) for x in vehicles if x.id != world.player.id]
for d, vehicle in sorted(vehicles, key=lambda vehicles: vehicles[0]):
if d > 200.0:
break
vehicle_type = get_actor_display_name(vehicle, truncate=22)
self._info_text.append('% 4dm %s' % (d, vehicle_type))
def toggle_info(self):
self._show_info = not self._show_info
def notification(self, text, seconds=2.0):
self._notifications.set_text(text, seconds=seconds)
def error(self, text):
self._notifications.set_text('Error: %s' % text, (255, 0, 0))
def render(self, display):
if self._show_info:
info_surface = pygame.Surface((220, self.dim[1]))
info_surface.set_alpha(100)
display.blit(info_surface, (0, 0))
v_offset = 4
bar_h_offset = 100
bar_width = 106
for item in self._info_text:
if v_offset + 18 > self.dim[1]:
break
if isinstance(item, list):
if len(item) > 1:
points = [(x + 8, v_offset + 8 + (1.0 - y) * 30) for x, y in enumerate(item)]
pygame.draw.lines(display, (255, 136, 0), False, points, 2)
item = None
v_offset += 18
elif isinstance(item, tuple):
if isinstance(item[1], bool):
rect = pygame.Rect((bar_h_offset, v_offset + 8), (6, 6))
pygame.draw.rect(display, (255, 255, 255), rect, 0 if item[1] else 1)
else:
rect_border = pygame.Rect((bar_h_offset, v_offset + 8), (bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect_border, 1)
f = (item[1] - item[2]) / (item[3] - item[2])
if item[2] < 0.0:
rect = pygame.Rect((bar_h_offset + f * (bar_width - 6), v_offset + 8), (6, 6))
else:
rect = pygame.Rect((bar_h_offset, v_offset + 8), (f * bar_width, 6))
pygame.draw.rect(display, (255, 255, 255), rect)
item = item[0]
if item:
surface = self._font_mono.render(item, True, (255, 255, 255))
display.blit(surface, (8, v_offset))
v_offset += 18
self._notifications.render(display)
self.help.render(display)
class FadingText(object):
def __init__(self, font, dim, pos):
self.font = font
self.dim = dim
self.pos = pos
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
def set_text(self, text, color=(255, 255, 255), seconds=2.0):
text_texture = self.font.render(text, True, color)
self.surface = pygame.Surface(self.dim)
self.seconds_left = seconds
self.surface.fill((0, 0, 0, 0))
self.surface.blit(text_texture, (10, 11))
def tick(self, _, clock):
delta_seconds = 1e-3 * clock.get_time()
self.seconds_left = max(0.0, self.seconds_left - delta_seconds)
self.surface.set_alpha(500.0 * self.seconds_left)
def render(self, display):
display.blit(self.surface, self.pos)
class HelpText(object):
def __init__(self, font, width, height):
lines = __doc__.split('\n')
self.font = font
self.line_space = 18
self.dim = (780, len(lines) * self.line_space + 12)
self.pos = (0.5 * width - 0.5 * self.dim[0], 0.5 * height - 0.5 * self.dim[1])
self.seconds_left = 0
self.surface = pygame.Surface(self.dim)
self.surface.fill((0, 0, 0, 0))
for n, line in enumerate(lines):
text_texture = self.font.render(line, True, (255, 255, 255))
self.surface.blit(text_texture, (22, n * self.line_space))
self._render = False
self.surface.set_alpha(220)
def toggle(self):
self._render = not self._render
def render(self, display):
if self._render:
display.blit(self.surface, self.pos)
class CollisionSensor(object):
def __init__(self, parent_actor, hud):
self.sensor = None
self.history = []
self._parent = parent_actor
self.hud = hud
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.collision')
self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: CollisionSensor._on_collision(weak_self, event))
def get_collision_history(self):
history = collections.defaultdict(int)
for frame, intensity in self.history:
history[frame] += intensity
return history
@staticmethod
def _on_collision(weak_self, event):
self = weak_self()
if not self:
return
actor_type = get_actor_display_name(event.other_actor)
self.hud.notification('Collision with %r' % actor_type)
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x**2 + impulse.y**2 + impulse.z**2)
self.history.append((event.frame, intensity))
if len(self.history) > 4000:
self.history.pop(0)
class LaneInvasionSensor(object):
def __init__(self, parent_actor, hud):
self.sensor = None
if parent_actor.type_id.startswith("vehicle."):
self._parent = parent_actor
self.hud = hud
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.lane_invasion')
self.sensor = world.spawn_actor(bp, carla.Transform(), attach_to=self._parent)
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: LaneInvasionSensor._on_invasion(weak_self, event))
@staticmethod
def _on_invasion(weak_self, event):
self = weak_self()
if not self:
return
lane_types = set(x.type for x in event.crossed_lane_markings)
text = ['%r' % str(x).split()[-1] for x in lane_types]
self.hud.notification('Crossed line %s' % ' and '.join(text))
class GnssSensor(object):
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
self.lat = 0.0
self.lon = 0.0
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.gnss')
self.sensor = world.spawn_actor(bp, carla.Transform(carla.Location(x=1.0, z=2.8)), attach_to=self._parent)
weak_self = weakref.ref(self)
self.sensor.listen(lambda event: GnssSensor._on_gnss_event(weak_self, event))
@staticmethod
def _on_gnss_event(weak_self, event):
self = weak_self()
if not self:
return
self.lat = event.latitude
self.lon = event.longitude
class IMUSensor(object):
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
self.accelerometer = (0.0, 0.0, 0.0)
self.gyroscope = (0.0, 0.0, 0.0)
self.compass = 0.0
world = self._parent.get_world()
bp = world.get_blueprint_library().find('sensor.other.imu')
self.sensor = world.spawn_actor(
bp, carla.Transform(), attach_to=self._parent)
weak_self = weakref.ref(self)
self.sensor.listen(
lambda sensor_data: IMUSensor._IMU_callback(weak_self, sensor_data))
@staticmethod
def _IMU_callback(weak_self, sensor_data):
self = weak_self()
if not self:
return
limits = (-99.9, 99.9)
self.accelerometer = (
max(limits[0], min(limits[1], sensor_data.accelerometer.x)),
max(limits[0], min(limits[1], sensor_data.accelerometer.y)),
max(limits[0], min(limits[1], sensor_data.accelerometer.z)))
self.gyroscope = (
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.x))),
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.y))),
max(limits[0], min(limits[1], math.degrees(sensor_data.gyroscope.z))))
self.compass = math.degrees(sensor_data.compass)
class RadarSensor(object):
def __init__(self, parent_actor):
self.sensor = None
self._parent = parent_actor
bound_x = 0.5 + self._parent.bounding_box.extent.x
bound_y = 0.5 + self._parent.bounding_box.extent.y
bound_z = 0.5 + self._parent.bounding_box.extent.z
self.velocity_range = 7.5
world = self._parent.get_world()
self.debug = world.debug
bp = world.get_blueprint_library().find('sensor.other.radar')
bp.set_attribute('horizontal_fov', str(35))
bp.set_attribute('vertical_fov', str(20))
self.sensor = world.spawn_actor(
bp,
carla.Transform(
carla.Location(x=bound_x + 0.05, z=bound_z+0.05),
carla.Rotation(pitch=5)),
attach_to=self._parent)
weak_self = weakref.ref(self)
self.sensor.listen(
lambda radar_data: RadarSensor._Radar_callback(weak_self, radar_data))
@staticmethod
def _Radar_callback(weak_self, radar_data):
self = weak_self()
if not self:
return
current_rot = radar_data.transform.rotation
for detect in radar_data:
azi = math.degrees(detect.azimuth)
alt = math.degrees(detect.altitude)
fw_vec = carla.Vector3D(x=detect.depth - 0.25)
carla.Transform(
carla.Location(),
carla.Rotation(
pitch=current_rot.pitch + alt,
yaw=current_rot.yaw + azi,
roll=current_rot.roll)).transform(fw_vec)
def clamp(min_v, max_v, value):
return max(min_v, min(value, max_v))
norm_velocity = detect.velocity / self.velocity_range
r = int(clamp(0.0, 1.0, 1.0 - norm_velocity) * 255.0)
g = int(clamp(0.0, 1.0, 1.0 - abs(norm_velocity)) * 255.0)
b = int(abs(clamp(- 1.0, 0.0, - 1.0 - norm_velocity)) * 255.0)
self.debug.draw_point(
radar_data.transform.location + fw_vec,
size=0.075,
life_time=0.06,
persistent_lines=False,
color=carla.Color(r, g, b))
class CameraManager(object):
def __init__(self, parent_actor, hud, gamma_correction):
self.sensor = None
self.surface = None
self._parent = parent_actor
self.hud = hud
self.recording = False
bound_x = 0.5 + self._parent.bounding_box.extent.x
bound_y = 0.5 + self._parent.bounding_box.extent.y
bound_z = 0.5 + self._parent.bounding_box.extent.z
Attachment = carla.AttachmentType
if not self._parent.type_id.startswith("walker.pedestrian"):
self._camera_transforms = [
(carla.Transform(carla.Location(x=-2.0*bound_x, y=+0.0*bound_y, z=2.0*bound_z), carla.Rotation(pitch=8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=+0.8*bound_x, y=+0.0*bound_y, z=1.3*bound_z)), Attachment.Rigid),
(carla.Transform(carla.Location(x=+1.9*bound_x, y=+1.0*bound_y, z=1.2*bound_z)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-2.8*bound_x, y=+0.0*bound_y, z=4.6*bound_z), carla.Rotation(pitch=6.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-1.0, y=-1.0*bound_y, z=0.4*bound_z)), Attachment.Rigid)]
else:
self._camera_transforms = [
(carla.Transform(carla.Location(x=-2.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=1.6, z=1.7)), Attachment.Rigid),
(carla.Transform(carla.Location(x=2.5, y=0.5, z=0.0), carla.Rotation(pitch=-8.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=-4.0, z=2.0), carla.Rotation(pitch=6.0)), Attachment.SpringArm),
(carla.Transform(carla.Location(x=0, y=-2.5, z=-0.0), carla.Rotation(yaw=90.0)), Attachment.Rigid)]
self.transform_index = 1
self.sensors = [
['sensor.camera.rgb', cc.Raw, 'Camera RGB', {}],
['sensor.camera.depth', cc.Raw, 'Camera Depth (Raw)', {}],
['sensor.camera.depth', cc.Depth, 'Camera Depth (Gray Scale)', {}],
['sensor.camera.depth', cc.LogarithmicDepth, 'Camera Depth (Logarithmic Gray Scale)', {}],
['sensor.camera.semantic_segmentation', cc.Raw, 'Camera Semantic Segmentation (Raw)', {}],
['sensor.camera.semantic_segmentation', cc.CityScapesPalette,
'Camera Semantic Segmentation (CityScapes Palette)', {}],
['sensor.lidar.ray_cast', None, 'Lidar (Ray-Cast)', {'range': '50'}],
['sensor.camera.dvs', cc.Raw, 'Dynamic Vision Sensor', {}],
['sensor.camera.rgb', cc.Raw, 'Camera RGB Distorted',
{'lens_circle_multiplier': '3.0',
'lens_circle_falloff': '3.0',
'chromatic_aberration_intensity': '0.5',
'chromatic_aberration_offset': '0'}],
['sensor.camera.optical_flow', cc.Raw, 'Optical Flow', {}],
]
world = self._parent.get_world()
bp_library = world.get_blueprint_library()
for item in self.sensors:
bp = bp_library.find(item[0])
if item[0].startswith('sensor.camera'):
bp.set_attribute('image_size_x', str(hud.dim[0]))
bp.set_attribute('image_size_y', str(hud.dim[1]))
if bp.has_attribute('gamma'):
bp.set_attribute('gamma', str(gamma_correction))
for attr_name, attr_value in item[3].items():
bp.set_attribute(attr_name, attr_value)
elif item[0].startswith('sensor.lidar'):
self.lidar_range = 50
for attr_name, attr_value in item[3].items():
bp.set_attribute(attr_name, attr_value)
if attr_name == 'range':
self.lidar_range = float(attr_value)
item.append(bp)
self.index = None
def toggle_camera(self):
self.transform_index = (self.transform_index + 1) % len(self._camera_transforms)
self.set_sensor(self.index, notify=False, force_respawn=True)
def set_sensor(self, index, notify=True, force_respawn=False):
index = index % len(self.sensors)
needs_respawn = True if self.index is None else \
(force_respawn or (self.sensors[index][2] != self.sensors[self.index][2]))
if needs_respawn:
if self.sensor is not None:
self.sensor.destroy()
self.surface = None
self.sensor = self._parent.get_world().spawn_actor(
self.sensors[index][-1],
self._camera_transforms[self.transform_index][0],
attach_to=self._parent,
attachment_type=self._camera_transforms[self.transform_index][1])
weak_self = weakref.ref(self)
self.sensor.listen(lambda image: CameraManager._parse_image(weak_self, image))
if notify:
self.hud.notification(self.sensors[index][2])
self.index = index
def next_sensor(self):
self.set_sensor(self.index + 1)
def toggle_recording(self):
self.recording = not self.recording
self.hud.notification('Recording %s' % ('On' if self.recording else 'Off'))
def render(self, display):
if self.surface is not None:
display.blit(self.surface, (0, 0))
@staticmethod
def scale_steer(steer):
if (steer >= 1):
return 1
elif (steer <= -1):
return -1
else:
return steer
@staticmethod
def predict_steering(img_rgb):
img_size = (66, 200, 3)
input_img = resize(img_rgb, img_size[:2])
input_img = expand_dims(input_img, 0)
steering_pred= model.predict(input_img)[0][0]
return steering_pred
@staticmethod
def _parse_image(weak_self, image):
self = weak_self()
if not self:
return
if self.sensors[self.index][0].startswith('sensor.lidar'):
points = np.frombuffer(image.raw_data, dtype=np.dtype('f4'))
points = np.reshape(points, (int(points.shape[0] / 4), 4))
lidar_data = np.array(points[:, :2])
lidar_data *= min(self.hud.dim) / (2.0 * self.lidar_range)
lidar_data += (0.5 * self.hud.dim[0], 0.5 * self.hud.dim[1])
lidar_data = np.fabs(lidar_data)
lidar_data = lidar_data.astype(np.int32)
lidar_data = np.reshape(lidar_data, (-1, 2))
lidar_img_size = (self.hud.dim[0], self.hud.dim[1], 3)
lidar_img = np.zeros((lidar_img_size), dtype=np.uint8)
lidar_img[tuple(lidar_data.T)] = (255, 255, 255)
self.surface = pygame.surfarray.make_surface(lidar_img)
elif self.sensors[self.index][0].startswith('sensor.camera.dvs'):
dvs_events = np.frombuffer(image.raw_data, dtype=np.dtype([
('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool)]))
dvs_img = np.zeros((image.height, image.width, 3), dtype=np.uint8)
dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255
self.surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))
elif self.sensors[self.index][0].startswith('sensor.camera.optical_flow'):
image = image.get_color_coded_flow()
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
array = array[:, :, ::-1]
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
else:
image.convert(self.sensors[self.index][1])
array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
array = np.reshape(array, (image.height, image.width, 4))
array = array[:, :, :3]
img_rgb = cv2.resize(np.float32(array), (320, 180))
pred_steering = CameraManager.predict_steering(img_rgb)
print('before scale', pred_steering)
pred_steering /=70
print('after scale', pred_steering)
pred_steering = CameraManager.scale_steer(pred_steering)
print("Predicted steering: ", pred_steering)
self._parent.apply_control(carla.VehicleControl(throttle=0.9, steer=1))
self.surface = pygame.surfarray.make_surface(array.swapaxes(0, 1))
if self.recording:
image.save_to_disk('_out/%08d' % image.frame)
def game_loop(args):
pygame.init()
pygame.font.init()
world = None
original_settings = None
try:
client = carla.Client(args.host, args.port)
client.set_timeout(20.0)
sim_world = client.get_world()
if args.sync:
original_settings = sim_world.get_settings()
settings = sim_world.get_settings()
if not settings.synchronous_mode:
settings.synchronous_mode = True
settings.fixed_delta_seconds = 0.05
sim_world.apply_settings(settings)
traffic_manager = client.get_trafficmanager()
traffic_manager.set_synchronous_mode(True)
if args.autopilot and not sim_world.get_settings().synchronous_mode:
print("WARNING: You are currently in asynchronous mode and could "
"experience some issues with the traffic simulation")
sim_world.unload_map_layer(carla.MapLayer.All)
= pygame.display.set_mode(
(args.width, args.height),
pygame.HWSURFACE | pygame.DOUBLEBUF)
display.fill((0,0,0))
pygame.display.flip()
hud = HUD(args.width, args.height)
world = World(sim_world, hud, args)
controller = KeyboardControl(world, args.autopilot)
if args.sync:
sim_world.tick()
else:
sim_world.wait_for_tick()
clock = pygame.time.Clock()
while True:
if args.sync:
sim_world.tick()
clock.tick_busy_loop(60)
if controller.parse_events(client, world, clock, args.sync):
return
world.tick(clock)
world.render(display)
pygame.display.flip()
finally:
if original_settings:
sim_world.apply_settings(original_settings)
if (world and world.recording_enabled):
client.stop_recorder()
if world is not None:
world.destroy()
pygame.quit()
def main():
argparser = argparse.ArgumentParser(
description='CARLA Manual Control Client')
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='127.0.0.1',
help='IP of the host server (default: 127.0.0.1)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'--res',
metavar='WIDTHxHEIGHT',
default='1280x720',
help='window resolution (default: 1280x720)')
argparser.add_argument(
'--filter',
metavar='PATTERN',
default='vehicle.*',
help='actor filter (default: "vehicle.*")')
argparser.add_argument(
'--generation',
metavar='G',
default='2',
help='restrict to certain actor generation (values: "1","2","All" - default: "2")')
argparser.add_argument(
'--rolename',
metavar='NAME',
default='hero',
help='actor role name (default: "hero")')
argparser.add_argument(
'--gamma',
default=2.2,
type=float,
help='Gamma correction of the camera (default: 2.2)')
argparser.add_argument(
'--sync',
action='store_true',
help='Activate synchronous mode execution')
args = argparser.parse_args()
args.width, args.height = [int(x) for x in args.res.split('x')]
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
print(__doc__)
try:
game_loop(args)
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
if __name__ == '__main__':
main()
| true | true |
f724b704c5f9a3e24eab2468a1fc1abdf02661db | 3,636 | py | Python | geist/backends/_x11_common.py | kebarr/Geist | 9bda3bc4e11ef06ebf8cb7c2b0ecd666ef40491a | [
"MIT"
] | 5 | 2015-05-01T15:58:48.000Z | 2017-04-19T03:38:25.000Z | geist/backends/_x11_common.py | tonysimpson/Geist | a1ef16d8b4c3777735008b671a50acfde3ce7bf1 | [
"MIT"
] | 1 | 2016-08-05T17:05:02.000Z | 2016-08-05T17:05:02.000Z | geist/backends/_x11_common.py | tonysimpson/Geist | a1ef16d8b4c3777735008b671a50acfde3ce7bf1 | [
"MIT"
] | 2 | 2016-09-27T13:45:31.000Z | 2017-05-21T14:08:57.000Z | from __future__ import division, absolute_import, print_function
from ooxcb.protocol import (
xtest,
)
from ooxcb.constant import (
ButtonPress,
ButtonRelease,
KeyPress,
KeyRelease,
MotionNotify
)
import ooxcb
from ooxcb.keysymdef import keysyms
import subprocess
import os
from ._common import BackendActionBuilder
xtest.mixin()
class _ActionsTransaction(object):
def __init__(self, backend):
self._conn = backend._conn
self._actions_builder = BackendActionBuilder(backend)
def __enter__(self):
return self._actions_builder
def __exit__(self, *args):
#with self._conn.bunch():
self._actions_builder.execute()
return False
class GeistXBase(object):
KEY_NAME_TO_CODE = keysyms
KEY_NAME_TO_CODE_IGNORE_CASE = {name.lower(): value
for name, value in keysyms.iteritems()}
def __init__(self, **kwargs):
display = kwargs.get('display', ':0')
self._display = display
self._conn = ooxcb.connect(display)
self._root = self._conn.setup.roots[self._conn.pref_screen].root
@property
def display(self):
return self._display
def create_process(self, command, shell=True, stdout=None, stderr=None,
env=None):
"""
Execute a process using subprocess.Popen, setting the backend's DISPLAY
"""
env = env if env is not None else dict(os.environ)
env['DISPLAY'] = self.display
return subprocess.Popen(command, shell=shell,
stdout=stdout, stderr=stderr,
env=env)
def actions_transaction(self):
return _ActionsTransaction(self)
def _get_key_code_from_name(self, name):
if name == 'shift':
symb = GeistXBase.KEY_NAME_TO_CODE['Shift_L']
elif name in GeistXBase.KEY_NAME_TO_CODE:
symb = GeistXBase.KEY_NAME_TO_CODE[name]
elif name.lower() in GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE:
symb = GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE[name]
else:
raise ValueError('unhandled key %r' % (name,))
return self._conn.keysyms.get_keycode(symb)
def key_down(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyPress,
detail=key_code
)
def key_up(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyRelease,
detail=key_code
)
def button_down(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonPress,
detail=button_num
)
def button_up(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonRelease,
detail=button_num
)
def move(self, point):
x, y = point
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
MotionNotify,
rootX=x,
rootY=y,
)
def cursor_position(self):
reply = self._root.query_pointer().reply()
return reply.root_x, reply.root_y
def close(self):
if hasattr(self, '_conn'):
self._conn.disconnect()
del self._conn
def __del__(self):
self.close()
| 28.186047 | 79 | 0.59791 | from __future__ import division, absolute_import, print_function
from ooxcb.protocol import (
xtest,
)
from ooxcb.constant import (
ButtonPress,
ButtonRelease,
KeyPress,
KeyRelease,
MotionNotify
)
import ooxcb
from ooxcb.keysymdef import keysyms
import subprocess
import os
from ._common import BackendActionBuilder
xtest.mixin()
class _ActionsTransaction(object):
def __init__(self, backend):
self._conn = backend._conn
self._actions_builder = BackendActionBuilder(backend)
def __enter__(self):
return self._actions_builder
def __exit__(self, *args):
self._actions_builder.execute()
return False
class GeistXBase(object):
KEY_NAME_TO_CODE = keysyms
KEY_NAME_TO_CODE_IGNORE_CASE = {name.lower(): value
for name, value in keysyms.iteritems()}
def __init__(self, **kwargs):
display = kwargs.get('display', ':0')
self._display = display
self._conn = ooxcb.connect(display)
self._root = self._conn.setup.roots[self._conn.pref_screen].root
@property
def display(self):
return self._display
def create_process(self, command, shell=True, stdout=None, stderr=None,
env=None):
env = env if env is not None else dict(os.environ)
env['DISPLAY'] = self.display
return subprocess.Popen(command, shell=shell,
stdout=stdout, stderr=stderr,
env=env)
def actions_transaction(self):
return _ActionsTransaction(self)
def _get_key_code_from_name(self, name):
if name == 'shift':
symb = GeistXBase.KEY_NAME_TO_CODE['Shift_L']
elif name in GeistXBase.KEY_NAME_TO_CODE:
symb = GeistXBase.KEY_NAME_TO_CODE[name]
elif name.lower() in GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE:
symb = GeistXBase.KEY_NAME_TO_CODE_IGNORE_CASE[name]
else:
raise ValueError('unhandled key %r' % (name,))
return self._conn.keysyms.get_keycode(symb)
def key_down(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyPress,
detail=key_code
)
def key_up(self, name):
key_code = self._get_key_code_from_name(name)
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
KeyRelease,
detail=key_code
)
def button_down(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonPress,
detail=button_num
)
def button_up(self, button_num):
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
ButtonRelease,
detail=button_num
)
def move(self, point):
x, y = point
with self._conn.bunch():
self._conn.xtest.fake_input_checked(
MotionNotify,
rootX=x,
rootY=y,
)
def cursor_position(self):
reply = self._root.query_pointer().reply()
return reply.root_x, reply.root_y
def close(self):
if hasattr(self, '_conn'):
self._conn.disconnect()
del self._conn
def __del__(self):
self.close()
| true | true |
f724b7df3714254f6a67ad6934798425f668dd4c | 565 | bzl | Python | src/test/java/com/google/devtools/build/skydoc/testdata/java_basic_test/input.bzl | ArielleA/bazel | f7be80e47445a1ddf301b1af0dda2f97b5a271ad | [
"Apache-2.0"
] | null | null | null | src/test/java/com/google/devtools/build/skydoc/testdata/java_basic_test/input.bzl | ArielleA/bazel | f7be80e47445a1ddf301b1af0dda2f97b5a271ad | [
"Apache-2.0"
] | null | null | null | src/test/java/com/google/devtools/build/skydoc/testdata/java_basic_test/input.bzl | ArielleA/bazel | f7be80e47445a1ddf301b1af0dda2f97b5a271ad | [
"Apache-2.0"
] | null | null | null | def exercise_the_api():
var1 = java_common.JavaRuntimeInfo
var2 = JavaInfo
var3 = java_proto_common
exercise_the_api()
def my_rule_impl(ctx):
return struct()
java_related_rule = rule(
implementation = my_rule_impl,
doc = "This rule does java-related things.",
attrs = {
"first": attr.label(mandatory = True, allow_files = True, single_file = True),
"second": attr.string_dict(mandatory = True),
"third": attr.output(mandatory = True),
"fourth": attr.bool(default = False, mandatory = False),
},
)
| 25.681818 | 86 | 0.656637 | def exercise_the_api():
var1 = java_common.JavaRuntimeInfo
var2 = JavaInfo
var3 = java_proto_common
exercise_the_api()
def my_rule_impl(ctx):
return struct()
java_related_rule = rule(
implementation = my_rule_impl,
doc = "This rule does java-related things.",
attrs = {
"first": attr.label(mandatory = True, allow_files = True, single_file = True),
"second": attr.string_dict(mandatory = True),
"third": attr.output(mandatory = True),
"fourth": attr.bool(default = False, mandatory = False),
},
)
| true | true |
f724b8126766f5f31898fdfe933c66db94b09011 | 18,759 | py | Python | rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_chebyshev.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_chebyshev.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | rootfs/usr/lib/python3/dist-packages/numpy/polynomial/tests/test_chebyshev.py | kappaIO-Dev/kappaIO-sdk-armhf-crosscompile | 66fc5fc21e6235f7a3be72a7ccac68e2224b7fb2 | [
"MIT"
] | null | null | null | """Tests for chebyshev module.
"""
import numpy as np
import numpy.polynomial.chebyshev as ch
from numpy.testing import *
def trim(x) :
return ch.chebtrim(x, tol=1e-6)
T0 = [ 1]
T1 = [ 0, 1]
T2 = [-1, 0, 2]
T3 = [ 0, -3, 0, 4]
T4 = [ 1, 0, -8, 0, 8]
T5 = [ 0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [ 0, -7, 0, 56, 0, -112, 0, 64]
T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(TestCase) :
def test__cseries_to_zseries(self) :
for i in range(5) :
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = ch._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self) :
for i in range(5) :
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = ch._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(TestCase) :
def test_chebdomain(self) :
assert_equal(ch.chebdomain, [-1, 1])
def test_chebzero(self) :
assert_equal(ch.chebzero, [0])
def test_chebone(self) :
assert_equal(ch.chebone, [1])
def test_chebx(self) :
assert_equal(ch.chebx, [0, 1])
class TestArithmetic(TestCase) :
def test_chebadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = ch.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = ch.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(ch.chebmulx([0]), [0])
assert_equal(ch.chebmulx([1]), [0,1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(ch.chebmulx(ser), tgt)
def test_chebmul(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = ch.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = ch.chebadd(ci, cj)
quo, rem = ch.chebdiv(tgt, ci)
res = ch.chebadd(ch.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebval(self) :
def f(x) :
return x*(x**2 - 1)
#check empty input
assert_equal(ch.chebval([], [1]).size, 0)
#check normal input)
for i in range(5) :
tgt = 1
res = ch.chebval(1, [0]*i + [1])
assert_almost_equal(res, tgt)
tgt = (-1)**i
res = ch.chebval(-1, [0]*i + [1])
assert_almost_equal(res, tgt)
zeros = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = 0
res = ch.chebval(zeros, [0]*i + [1])
assert_almost_equal(res, tgt)
x = np.linspace(-1,1)
tgt = f(x)
res = ch.chebval(x, [0, -.25, 0, .25])
assert_almost_equal(res, tgt)
#check that shape is preserved
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(ch.chebval(x, [1]).shape, dims)
assert_equal(ch.chebval(x, [1,0]).shape, dims)
assert_equal(ch.chebval(x, [1,0,0]).shape, dims)
class TestCalculus(TestCase) :
def test_chebint(self) :
# check exceptions
assert_raises(ValueError, ch.chebint, [0], .5)
assert_raises(ValueError, ch.chebint, [0], -1)
assert_raises(ValueError, ch.chebint, [0], 1, [0,0])
# test integration of zero polynomial
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = ch.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
# check single integration with integration constant
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = ch.poly2cheb(pol)
chebint = ch.chebint(chebpol, m=1, k=[i])
res = ch.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check single integration with integration constant and lbnd
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
chebpol = ch.poly2cheb(pol)
chebint = ch.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(ch.chebval(-1, chebint), i)
# check single integration with integration constant and scaling
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = ch.poly2cheb(pol)
chebint = ch.chebint(chebpol, m=1, k=[i], scl=2)
res = ch.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with default k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1)
res = ch.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with defined k
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1, k=[k])
res = ch.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with lbnd
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1, k=[k], lbnd=-1)
res = ch.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
# check multiple integrations with scaling
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1, k=[k], scl=2)
res = ch.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder(self) :
# check exceptions
assert_raises(ValueError, ch.chebder, [0], .5)
assert_raises(ValueError, ch.chebder, [0], -1)
# check that zeroth deriviative does nothing
for i in range(5) :
tgt = [1] + [0]*i
res = ch.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
# check that derivation is the inverse of integration
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = ch.chebder(ch.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
# check derivation with scaling
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = ch.chebder(ch.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
class TestMisc(TestCase) :
def test_chebfromroots(self) :
res = ch.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = [0]*i + [1]
res = ch.chebfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res),trim(tgt))
def test_chebroots(self) :
assert_almost_equal(ch.chebroots([1]), [])
assert_almost_equal(ch.chebroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = ch.chebroots(ch.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebvander(self) :
# check for 1d x
x = np.arange(3)
v = ch.chebvander(x, 3)
assert_(v.shape == (3,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], ch.chebval(x, coef))
# check for 2d x
x = np.array([[1,2],[3,4],[5,6]])
v = ch.chebvander(x, 3)
assert_(v.shape == (3,2,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], ch.chebval(x, coef))
def test_chebfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
# Test exceptions
assert_raises(ValueError, ch.chebfit, [1], [1], -1)
assert_raises(TypeError, ch.chebfit, [[1]], [1], 0)
assert_raises(TypeError, ch.chebfit, [], [1], 0)
assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1,1])
# Test fit
x = np.linspace(0,2)
y = f(x)
#
coef3 = ch.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(ch.chebval(x, coef3), y)
#
coef4 = ch.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(ch.chebval(x, coef4), y)
#
coef2d = ch.chebfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = ch.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = ch.chebfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def test_chebtrim(self) :
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, ch.chebtrim, coef, -1)
# Test results
assert_equal(ch.chebtrim(coef), coef[:-1])
assert_equal(ch.chebtrim(coef, 1), coef[:-3])
assert_equal(ch.chebtrim(coef, 2), [0])
def test_chebline(self) :
assert_equal(ch.chebline(3,4), [3, 4])
def test_cheb2poly(self) :
for i in range(10) :
assert_almost_equal(ch.cheb2poly([0]*i + [1]), Tlist[i])
def test_poly2cheb(self) :
for i in range(10) :
assert_almost_equal(ch.poly2cheb(Tlist[i]), [0]*i + [1])
def test_chebpts1(self):
#test exceptions
assert_raises(ValueError, ch.chebpts1, 1.5)
assert_raises(ValueError, ch.chebpts1, 0)
#test points
tgt = [0]
assert_almost_equal(ch.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(ch.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(ch.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(ch.chebpts1(4), tgt)
def test_chebpts2(self):
#test exceptions
assert_raises(ValueError, ch.chebpts2, 1.5)
assert_raises(ValueError, ch.chebpts2, 1)
#test points
tgt = [-1, 1]
assert_almost_equal(ch.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(ch.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(ch.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(ch.chebpts2(5), tgt)
class TestChebyshevClass(TestCase) :
p1 = ch.Chebyshev([1,2,3])
p2 = ch.Chebyshev([1,2,3], [0,1])
p3 = ch.Chebyshev([1,2])
p4 = ch.Chebyshev([2,2,3])
p5 = ch.Chebyshev([3,2,3])
def test_equal(self) :
assert_(self.p1 == self.p1)
assert_(self.p2 == self.p2)
assert_(not self.p1 == self.p2)
assert_(not self.p1 == self.p3)
assert_(not self.p1 == [1,2,3])
def test_not_equal(self) :
assert_(not self.p1 != self.p1)
assert_(not self.p2 != self.p2)
assert_(self.p1 != self.p2)
assert_(self.p1 != self.p3)
assert_(self.p1 != [1,2,3])
def test_add(self) :
tgt = ch.Chebyshev([2,4,6])
assert_(self.p1 + self.p1 == tgt)
assert_(self.p1 + [1,2,3] == tgt)
assert_([1,2,3] + self.p1 == tgt)
def test_sub(self) :
tgt = ch.Chebyshev([1])
assert_(self.p4 - self.p1 == tgt)
assert_(self.p4 - [1,2,3] == tgt)
assert_([2,2,3] - self.p1 == tgt)
def test_mul(self) :
tgt = ch.Chebyshev([7.5, 10., 8., 6., 4.5])
assert_(self.p1 * self.p1 == tgt)
assert_(self.p1 * [1,2,3] == tgt)
assert_([1,2,3] * self.p1 == tgt)
def test_floordiv(self) :
tgt = ch.Chebyshev([1])
assert_(self.p4 // self.p1 == tgt)
assert_(self.p4 // [1,2,3] == tgt)
assert_([2,2,3] // self.p1 == tgt)
def test_mod(self) :
tgt = ch.Chebyshev([1])
assert_((self.p4 % self.p1) == tgt)
assert_((self.p4 % [1,2,3]) == tgt)
assert_(([2,2,3] % self.p1) == tgt)
def test_divmod(self) :
tquo = ch.Chebyshev([1])
trem = ch.Chebyshev([2])
quo, rem = divmod(self.p5, self.p1)
assert_(quo == tquo and rem == trem)
quo, rem = divmod(self.p5, [1,2,3])
assert_(quo == tquo and rem == trem)
quo, rem = divmod([3,2,3], self.p1)
assert_(quo == tquo and rem == trem)
def test_pow(self) :
tgt = ch.Chebyshev([1])
for i in range(5) :
res = self.p1**i
assert_(res == tgt)
tgt *= self.p1
def test_call(self) :
# domain = [-1, 1]
x = np.linspace(-1, 1)
tgt = 3*(2*x**2 - 1) + 2*x + 1
assert_almost_equal(self.p1(x), tgt)
# domain = [0, 1]
x = np.linspace(0, 1)
xx = 2*x - 1
assert_almost_equal(self.p2(x), self.p1(xx))
def test_degree(self) :
assert_equal(self.p1.degree(), 2)
def test_cutdeg(self) :
assert_raises(ValueError, self.p1.cutdeg, .5)
assert_raises(ValueError, self.p1.cutdeg, -1)
assert_equal(len(self.p1.cutdeg(3)), 3)
assert_equal(len(self.p1.cutdeg(2)), 3)
assert_equal(len(self.p1.cutdeg(1)), 2)
assert_equal(len(self.p1.cutdeg(0)), 1)
def test_convert(self) :
x = np.linspace(-1,1)
p = self.p1.convert(domain=[0,1])
assert_almost_equal(p(x), self.p1(x))
def test_mapparms(self) :
parms = self.p2.mapparms()
assert_almost_equal(parms, [-1, 2])
def test_trim(self) :
coef = [1, 1e-6, 1e-12, 0]
p = ch.Chebyshev(coef)
assert_equal(p.trim().coef, coef[:3])
assert_equal(p.trim(1e-10).coef, coef[:2])
assert_equal(p.trim(1e-5).coef, coef[:1])
def test_truncate(self) :
assert_raises(ValueError, self.p1.truncate, .5)
assert_raises(ValueError, self.p1.truncate, 0)
assert_equal(len(self.p1.truncate(4)), 3)
assert_equal(len(self.p1.truncate(3)), 3)
assert_equal(len(self.p1.truncate(2)), 2)
assert_equal(len(self.p1.truncate(1)), 1)
def test_copy(self) :
p = self.p1.copy()
assert_(self.p1 == p)
def test_integ(self) :
p = self.p2.integ()
assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 0, scl=.5))
p = self.p2.integ(lbnd=0)
assert_almost_equal(p(0), 0)
p = self.p2.integ(1, 1)
assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 1, scl=.5))
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.coef, ch.chebint([1,2,3], 2, [1,2], scl=.5))
def test_deriv(self) :
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)
assert_almost_equal(p.deriv(2).coef, self.p2.coef)
def test_roots(self) :
p = ch.Chebyshev(ch.poly2cheb([0, -1, 0, 1]), [0, 1])
res = p.roots()
tgt = [0, .5, 1]
assert_almost_equal(res, tgt)
def test_linspace(self):
xdes = np.linspace(0, 1, 20)
ydes = self.p2(xdes)
xres, yres = self.p2.linspace(20)
assert_almost_equal(xres, xdes)
assert_almost_equal(yres, ydes)
def test_fromroots(self) :
roots = [0, .5, 1]
p = ch.Chebyshev.fromroots(roots, domain=[0, 1])
res = p.coef
tgt = ch.poly2cheb([0, -1, 0, 1])
assert_almost_equal(res, tgt)
def test_fit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
x = np.linspace(0,3)
y = f(x)
# test default value of domain
p = ch.Chebyshev.fit(x, y, 3)
assert_almost_equal(p.domain, [0,3])
# test that fit works in given domains
p = ch.Chebyshev.fit(x, y, 3, None)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [0,3])
p = ch.Chebyshev.fit(x, y, 3, [])
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [-1, 1])
# test that fit accepts weights.
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
p = ch.Chebyshev.fit(x, yw, 3, w=w)
assert_almost_equal(p(x), y)
def test_identity(self) :
x = np.linspace(0,3)
p = ch.Chebyshev.identity()
assert_almost_equal(p(x), x)
p = ch.Chebyshev.identity([1,3])
assert_almost_equal(p(x), x)
#
if __name__ == "__main__":
run_module_suite()
| 32.85289 | 74 | 0.504824 |
import numpy as np
import numpy.polynomial.chebyshev as ch
from numpy.testing import *
def trim(x) :
return ch.chebtrim(x, tol=1e-6)
T0 = [ 1]
T1 = [ 0, 1]
T2 = [-1, 0, 2]
T3 = [ 0, -3, 0, 4]
T4 = [ 1, 0, -8, 0, 8]
T5 = [ 0, 5, 0, -20, 0, 16]
T6 = [-1, 0, 18, 0, -48, 0, 32]
T7 = [ 0, -7, 0, 56, 0, -112, 0, 64]
T8 = [ 1, 0, -32, 0, 160, 0, -256, 0, 128]
T9 = [ 0, 9, 0, -120, 0, 432, 0, -576, 0, 256]
Tlist = [T0, T1, T2, T3, T4, T5, T6, T7, T8, T9]
class TestPrivate(TestCase) :
def test__cseries_to_zseries(self) :
for i in range(5) :
inp = np.array([2] + [1]*i, np.double)
tgt = np.array([.5]*i + [2] + [.5]*i, np.double)
res = ch._cseries_to_zseries(inp)
assert_equal(res, tgt)
def test__zseries_to_cseries(self) :
for i in range(5) :
inp = np.array([.5]*i + [2] + [.5]*i, np.double)
tgt = np.array([2] + [1]*i, np.double)
res = ch._zseries_to_cseries(inp)
assert_equal(res, tgt)
class TestConstants(TestCase) :
def test_chebdomain(self) :
assert_equal(ch.chebdomain, [-1, 1])
def test_chebzero(self) :
assert_equal(ch.chebzero, [0])
def test_chebone(self) :
assert_equal(ch.chebone, [1])
def test_chebx(self) :
assert_equal(ch.chebx, [0, 1])
class TestArithmetic(TestCase) :
def test_chebadd(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] += 1
res = ch.chebadd([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebsub(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(max(i,j) + 1)
tgt[i] += 1
tgt[j] -= 1
res = ch.chebsub([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebmulx(self):
assert_equal(ch.chebmulx([0]), [0])
assert_equal(ch.chebmulx([1]), [0,1])
for i in range(1, 5):
ser = [0]*i + [1]
tgt = [0]*(i - 1) + [.5, 0, .5]
assert_equal(ch.chebmulx(ser), tgt)
def test_chebmul(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
tgt = np.zeros(i + j + 1)
tgt[i + j] += .5
tgt[abs(i - j)] += .5
res = ch.chebmul([0]*i + [1], [0]*j + [1])
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebdiv(self) :
for i in range(5) :
for j in range(5) :
msg = "At i=%d, j=%d" % (i,j)
ci = [0]*i + [1]
cj = [0]*j + [1]
tgt = ch.chebadd(ci, cj)
quo, rem = ch.chebdiv(tgt, ci)
res = ch.chebadd(ch.chebmul(quo, ci), rem)
assert_equal(trim(res), trim(tgt), err_msg=msg)
def test_chebval(self) :
def f(x) :
return x*(x**2 - 1)
assert_equal(ch.chebval([], [1]).size, 0)
for i in range(5) :
tgt = 1
res = ch.chebval(1, [0]*i + [1])
assert_almost_equal(res, tgt)
tgt = (-1)**i
res = ch.chebval(-1, [0]*i + [1])
assert_almost_equal(res, tgt)
zeros = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = 0
res = ch.chebval(zeros, [0]*i + [1])
assert_almost_equal(res, tgt)
x = np.linspace(-1,1)
tgt = f(x)
res = ch.chebval(x, [0, -.25, 0, .25])
assert_almost_equal(res, tgt)
for i in range(3) :
dims = [2]*i
x = np.zeros(dims)
assert_equal(ch.chebval(x, [1]).shape, dims)
assert_equal(ch.chebval(x, [1,0]).shape, dims)
assert_equal(ch.chebval(x, [1,0,0]).shape, dims)
class TestCalculus(TestCase) :
def test_chebint(self) :
assert_raises(ValueError, ch.chebint, [0], .5)
assert_raises(ValueError, ch.chebint, [0], -1)
assert_raises(ValueError, ch.chebint, [0], 1, [0,0])
for i in range(2, 5):
k = [0]*(i - 2) + [1]
res = ch.chebint([0], m=i, k=k)
assert_almost_equal(res, [0, 1])
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [1/scl]
chebpol = ch.poly2cheb(pol)
chebint = ch.chebint(chebpol, m=1, k=[i])
res = ch.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
chebpol = ch.poly2cheb(pol)
chebint = ch.chebint(chebpol, m=1, k=[i], lbnd=-1)
assert_almost_equal(ch.chebval(-1, chebint), i)
for i in range(5) :
scl = i + 1
pol = [0]*i + [1]
tgt = [i] + [0]*i + [2/scl]
chebpol = ch.poly2cheb(pol)
chebint = ch.chebint(chebpol, m=1, k=[i], scl=2)
res = ch.cheb2poly(chebint)
assert_almost_equal(trim(res), trim(tgt))
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1)
res = ch.chebint(pol, m=j)
assert_almost_equal(trim(res), trim(tgt))
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1, k=[k])
res = ch.chebint(pol, m=j, k=list(range(j)))
assert_almost_equal(trim(res), trim(tgt))
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1, k=[k], lbnd=-1)
res = ch.chebint(pol, m=j, k=list(range(j)), lbnd=-1)
assert_almost_equal(trim(res), trim(tgt))
for i in range(5) :
for j in range(2,5) :
pol = [0]*i + [1]
tgt = pol[:]
for k in range(j) :
tgt = ch.chebint(tgt, m=1, k=[k], scl=2)
res = ch.chebint(pol, m=j, k=list(range(j)), scl=2)
assert_almost_equal(trim(res), trim(tgt))
def test_chebder(self) :
assert_raises(ValueError, ch.chebder, [0], .5)
assert_raises(ValueError, ch.chebder, [0], -1)
for i in range(5) :
tgt = [1] + [0]*i
res = ch.chebder(tgt, m=0)
assert_equal(trim(res), trim(tgt))
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = ch.chebder(ch.chebint(tgt, m=j), m=j)
assert_almost_equal(trim(res), trim(tgt))
for i in range(5) :
for j in range(2,5) :
tgt = [1] + [0]*i
res = ch.chebder(ch.chebint(tgt, m=j, scl=2), m=j, scl=.5)
assert_almost_equal(trim(res), trim(tgt))
class TestMisc(TestCase) :
def test_chebfromroots(self) :
res = ch.chebfromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1,5) :
roots = np.cos(np.linspace(-np.pi, 0, 2*i + 1)[1::2])
tgt = [0]*i + [1]
res = ch.chebfromroots(roots)*2**(i-1)
assert_almost_equal(trim(res),trim(tgt))
def test_chebroots(self) :
assert_almost_equal(ch.chebroots([1]), [])
assert_almost_equal(ch.chebroots([1, 2]), [-.5])
for i in range(2,5) :
tgt = np.linspace(-1, 1, i)
res = ch.chebroots(ch.chebfromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_chebvander(self) :
x = np.arange(3)
v = ch.chebvander(x, 3)
assert_(v.shape == (3,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], ch.chebval(x, coef))
x = np.array([[1,2],[3,4],[5,6]])
v = ch.chebvander(x, 3)
assert_(v.shape == (3,2,4))
for i in range(4) :
coef = [0]*i + [1]
assert_almost_equal(v[...,i], ch.chebval(x, coef))
def test_chebfit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
assert_raises(ValueError, ch.chebfit, [1], [1], -1)
assert_raises(TypeError, ch.chebfit, [[1]], [1], 0)
assert_raises(TypeError, ch.chebfit, [], [1], 0)
assert_raises(TypeError, ch.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, ch.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, ch.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, ch.chebfit, [1], [1], 0, w=[1,1])
x = np.linspace(0,2)
y = f(x)
coef3 = ch.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(ch.chebval(x, coef3), y)
coef4 = ch.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(ch.chebval(x, coef4), y)
coef2d = ch.chebfit(x, np.array([y,y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3,coef3]).T)
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = ch.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef2d = ch.chebfit(x, np.array([yw,yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3,coef3]).T)
def test_chebtrim(self) :
coef = [2, -1, 1, 0]
assert_raises(ValueError, ch.chebtrim, coef, -1)
assert_equal(ch.chebtrim(coef), coef[:-1])
assert_equal(ch.chebtrim(coef, 1), coef[:-3])
assert_equal(ch.chebtrim(coef, 2), [0])
def test_chebline(self) :
assert_equal(ch.chebline(3,4), [3, 4])
def test_cheb2poly(self) :
for i in range(10) :
assert_almost_equal(ch.cheb2poly([0]*i + [1]), Tlist[i])
def test_poly2cheb(self) :
for i in range(10) :
assert_almost_equal(ch.poly2cheb(Tlist[i]), [0]*i + [1])
def test_chebpts1(self):
assert_raises(ValueError, ch.chebpts1, 1.5)
assert_raises(ValueError, ch.chebpts1, 0)
tgt = [0]
assert_almost_equal(ch.chebpts1(1), tgt)
tgt = [-0.70710678118654746, 0.70710678118654746]
assert_almost_equal(ch.chebpts1(2), tgt)
tgt = [-0.86602540378443871, 0, 0.86602540378443871]
assert_almost_equal(ch.chebpts1(3), tgt)
tgt = [-0.9238795325, -0.3826834323, 0.3826834323, 0.9238795325]
assert_almost_equal(ch.chebpts1(4), tgt)
def test_chebpts2(self):
assert_raises(ValueError, ch.chebpts2, 1.5)
assert_raises(ValueError, ch.chebpts2, 1)
tgt = [-1, 1]
assert_almost_equal(ch.chebpts2(2), tgt)
tgt = [-1, 0, 1]
assert_almost_equal(ch.chebpts2(3), tgt)
tgt = [-1, -0.5, .5, 1]
assert_almost_equal(ch.chebpts2(4), tgt)
tgt = [-1.0, -0.707106781187, 0, 0.707106781187, 1.0]
assert_almost_equal(ch.chebpts2(5), tgt)
class TestChebyshevClass(TestCase) :
p1 = ch.Chebyshev([1,2,3])
p2 = ch.Chebyshev([1,2,3], [0,1])
p3 = ch.Chebyshev([1,2])
p4 = ch.Chebyshev([2,2,3])
p5 = ch.Chebyshev([3,2,3])
def test_equal(self) :
assert_(self.p1 == self.p1)
assert_(self.p2 == self.p2)
assert_(not self.p1 == self.p2)
assert_(not self.p1 == self.p3)
assert_(not self.p1 == [1,2,3])
def test_not_equal(self) :
assert_(not self.p1 != self.p1)
assert_(not self.p2 != self.p2)
assert_(self.p1 != self.p2)
assert_(self.p1 != self.p3)
assert_(self.p1 != [1,2,3])
def test_add(self) :
tgt = ch.Chebyshev([2,4,6])
assert_(self.p1 + self.p1 == tgt)
assert_(self.p1 + [1,2,3] == tgt)
assert_([1,2,3] + self.p1 == tgt)
def test_sub(self) :
tgt = ch.Chebyshev([1])
assert_(self.p4 - self.p1 == tgt)
assert_(self.p4 - [1,2,3] == tgt)
assert_([2,2,3] - self.p1 == tgt)
def test_mul(self) :
tgt = ch.Chebyshev([7.5, 10., 8., 6., 4.5])
assert_(self.p1 * self.p1 == tgt)
assert_(self.p1 * [1,2,3] == tgt)
assert_([1,2,3] * self.p1 == tgt)
def test_floordiv(self) :
tgt = ch.Chebyshev([1])
assert_(self.p4 // self.p1 == tgt)
assert_(self.p4 // [1,2,3] == tgt)
assert_([2,2,3] // self.p1 == tgt)
def test_mod(self) :
tgt = ch.Chebyshev([1])
assert_((self.p4 % self.p1) == tgt)
assert_((self.p4 % [1,2,3]) == tgt)
assert_(([2,2,3] % self.p1) == tgt)
def test_divmod(self) :
tquo = ch.Chebyshev([1])
trem = ch.Chebyshev([2])
quo, rem = divmod(self.p5, self.p1)
assert_(quo == tquo and rem == trem)
quo, rem = divmod(self.p5, [1,2,3])
assert_(quo == tquo and rem == trem)
quo, rem = divmod([3,2,3], self.p1)
assert_(quo == tquo and rem == trem)
def test_pow(self) :
tgt = ch.Chebyshev([1])
for i in range(5) :
res = self.p1**i
assert_(res == tgt)
tgt *= self.p1
def test_call(self) :
x = np.linspace(-1, 1)
tgt = 3*(2*x**2 - 1) + 2*x + 1
assert_almost_equal(self.p1(x), tgt)
x = np.linspace(0, 1)
xx = 2*x - 1
assert_almost_equal(self.p2(x), self.p1(xx))
def test_degree(self) :
assert_equal(self.p1.degree(), 2)
def test_cutdeg(self) :
assert_raises(ValueError, self.p1.cutdeg, .5)
assert_raises(ValueError, self.p1.cutdeg, -1)
assert_equal(len(self.p1.cutdeg(3)), 3)
assert_equal(len(self.p1.cutdeg(2)), 3)
assert_equal(len(self.p1.cutdeg(1)), 2)
assert_equal(len(self.p1.cutdeg(0)), 1)
def test_convert(self) :
x = np.linspace(-1,1)
p = self.p1.convert(domain=[0,1])
assert_almost_equal(p(x), self.p1(x))
def test_mapparms(self) :
parms = self.p2.mapparms()
assert_almost_equal(parms, [-1, 2])
def test_trim(self) :
coef = [1, 1e-6, 1e-12, 0]
p = ch.Chebyshev(coef)
assert_equal(p.trim().coef, coef[:3])
assert_equal(p.trim(1e-10).coef, coef[:2])
assert_equal(p.trim(1e-5).coef, coef[:1])
def test_truncate(self) :
assert_raises(ValueError, self.p1.truncate, .5)
assert_raises(ValueError, self.p1.truncate, 0)
assert_equal(len(self.p1.truncate(4)), 3)
assert_equal(len(self.p1.truncate(3)), 3)
assert_equal(len(self.p1.truncate(2)), 2)
assert_equal(len(self.p1.truncate(1)), 1)
def test_copy(self) :
p = self.p1.copy()
assert_(self.p1 == p)
def test_integ(self) :
p = self.p2.integ()
assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 0, scl=.5))
p = self.p2.integ(lbnd=0)
assert_almost_equal(p(0), 0)
p = self.p2.integ(1, 1)
assert_almost_equal(p.coef, ch.chebint([1,2,3], 1, 1, scl=.5))
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.coef, ch.chebint([1,2,3], 2, [1,2], scl=.5))
def test_deriv(self) :
p = self.p2.integ(2, [1, 2])
assert_almost_equal(p.deriv(1).coef, self.p2.integ(1, [1]).coef)
assert_almost_equal(p.deriv(2).coef, self.p2.coef)
def test_roots(self) :
p = ch.Chebyshev(ch.poly2cheb([0, -1, 0, 1]), [0, 1])
res = p.roots()
tgt = [0, .5, 1]
assert_almost_equal(res, tgt)
def test_linspace(self):
xdes = np.linspace(0, 1, 20)
ydes = self.p2(xdes)
xres, yres = self.p2.linspace(20)
assert_almost_equal(xres, xdes)
assert_almost_equal(yres, ydes)
def test_fromroots(self) :
roots = [0, .5, 1]
p = ch.Chebyshev.fromroots(roots, domain=[0, 1])
res = p.coef
tgt = ch.poly2cheb([0, -1, 0, 1])
assert_almost_equal(res, tgt)
def test_fit(self) :
def f(x) :
return x*(x - 1)*(x - 2)
x = np.linspace(0,3)
y = f(x)
p = ch.Chebyshev.fit(x, y, 3)
assert_almost_equal(p.domain, [0,3])
p = ch.Chebyshev.fit(x, y, 3, None)
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [0,3])
p = ch.Chebyshev.fit(x, y, 3, [])
assert_almost_equal(p(x), y)
assert_almost_equal(p.domain, [-1, 1])
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
yw[0::2] = 0
p = ch.Chebyshev.fit(x, yw, 3, w=w)
assert_almost_equal(p(x), y)
def test_identity(self) :
x = np.linspace(0,3)
p = ch.Chebyshev.identity()
assert_almost_equal(p(x), x)
p = ch.Chebyshev.identity([1,3])
assert_almost_equal(p(x), x)
if __name__ == "__main__":
run_module_suite()
| true | true |
f724b89350fc66e26a9bf5bea0145e663b49d42f | 379 | py | Python | sandbox/team_members/pudumula/ros/gazebo_ws_1/build/rrbot_gazebo/catkin_generated/pkg.installspace.context.pc.py | Project-Heisenberg/quantum | f3ad8f4693007e45e80a88f928273adcfdc8529d | [
"Apache-2.0"
] | 1 | 2017-04-23T14:23:54.000Z | 2017-04-23T14:23:54.000Z | sandbox/team_members/pudumula/ros/gazebo_ws_1/build/rrbot_gazebo/catkin_generated/pkg.installspace.context.pc.py | Project-Heisenberg/quantum | f3ad8f4693007e45e80a88f928273adcfdc8529d | [
"Apache-2.0"
] | 13 | 2016-03-25T05:15:17.000Z | 2018-05-30T15:53:12.000Z | sandbox/team_members/pudumula/ros/gazebo_ws_1/build/rrbot_gazebo/catkin_generated/pkg.installspace.context.pc.py | Project-Heisenberg/quantum | f3ad8f4693007e45e80a88f928273adcfdc8529d | [
"Apache-2.0"
] | null | null | null | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rrbot_gazebo"
PROJECT_SPACE_DIR = "/home/neo/ros/gazebo_ws_1/install"
PROJECT_VERSION = "0.1.0"
| 42.111111 | 68 | 0.707124 |
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "rrbot_gazebo"
PROJECT_SPACE_DIR = "/home/neo/ros/gazebo_ws_1/install"
PROJECT_VERSION = "0.1.0"
| true | true |
f724b89596ee0599327fb7ccded8ba2067f2a7b5 | 597 | py | Python | cashflow/utils.py | wg-git/cashflow-contract | e08b175113e7bdcd0089dd7a0d0432a73a83a724 | [
"MIT"
] | 1 | 2018-04-05T13:14:26.000Z | 2018-04-05T13:14:26.000Z | cashflow/utils.py | wg-git/cashflow-contract | e08b175113e7bdcd0089dd7a0d0432a73a83a724 | [
"MIT"
] | null | null | null | cashflow/utils.py | wg-git/cashflow-contract | e08b175113e7bdcd0089dd7a0d0432a73a83a724 | [
"MIT"
] | 1 | 2018-07-23T15:05:05.000Z | 2018-07-23T15:05:05.000Z | from boa.blockchain.vm.Neo.Blockchain import GetHeight, GetHeader
from boa.blockchain.vm.Neo.Header import GetTimestamp, GetConsensusData
from boa.blockchain.vm.Neo.Runtime import Log
from boa.code.builtins import concat, list, range, take, substr
def blockTimeStamp():
current_height = GetHeight()
current_header = GetHeader(current_height)
current_time = GetTimestamp(current_header)
return current_time
def isInByteArray(haystack,needle):
if not len(haystack):
return False
for item in haystack:
if item == needle:
return True
else:
n=0
return False
| 22.111111 | 75 | 0.757119 | from boa.blockchain.vm.Neo.Blockchain import GetHeight, GetHeader
from boa.blockchain.vm.Neo.Header import GetTimestamp, GetConsensusData
from boa.blockchain.vm.Neo.Runtime import Log
from boa.code.builtins import concat, list, range, take, substr
def blockTimeStamp():
current_height = GetHeight()
current_header = GetHeader(current_height)
current_time = GetTimestamp(current_header)
return current_time
def isInByteArray(haystack,needle):
if not len(haystack):
return False
for item in haystack:
if item == needle:
return True
else:
n=0
return False
| true | true |
f724b8e8834118693a545cb670d7b52bbf2cbbeb | 209 | py | Python | PyCharm/3.py | LazarevaDarya/work5 | 77c29c3453e81995f36c176bb74a610313b882e0 | [
"MIT"
] | null | null | null | PyCharm/3.py | LazarevaDarya/work5 | 77c29c3453e81995f36c176bb74a610313b882e0 | [
"MIT"
] | null | null | null | PyCharm/3.py | LazarevaDarya/work5 | 77c29c3453e81995f36c176bb74a610313b882e0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
if __name__ == '__main__':
m = int
n = int(input("Value of n? "))
for i in range(1, int(n / 2) + 1):
if n % i == 0:
print(i)
| 14.928571 | 38 | 0.449761 |
if __name__ == '__main__':
m = int
n = int(input("Value of n? "))
for i in range(1, int(n / 2) + 1):
if n % i == 0:
print(i)
| true | true |
f724b9955f815b8ade788558ec0d789cd6cf0ddc | 15,387 | py | Python | tests/automation_framework/src/utilities/submit_request_utility.py | anjalirx-intel/avalon | 5efd20612948a324b8a393bfe22872aeb8527097 | [
"Apache-2.0"
] | null | null | null | tests/automation_framework/src/utilities/submit_request_utility.py | anjalirx-intel/avalon | 5efd20612948a324b8a393bfe22872aeb8527097 | [
"Apache-2.0"
] | null | null | null | tests/automation_framework/src/utilities/submit_request_utility.py | anjalirx-intel/avalon | 5efd20612948a324b8a393bfe22872aeb8527097 | [
"Apache-2.0"
] | null | null | null | import logging
import json
import time
import os
import config.config as pconfig
import env
from avalon_sdk.connector.direct.jrpc.jrpc_worker_registry import \
JRPCWorkerRegistryImpl
from avalon_sdk.connector.direct.jrpc.jrpc_work_order import \
JRPCWorkOrderImpl
from avalon_sdk.worker.worker_details import \
WorkerType, WorkerStatus
from avalon_sdk.connector.direct.jrpc.jrpc_work_order_receipt \
import JRPCWorkOrderReceiptImpl
from avalon_sdk.connector.blockchains.fabric.fabric_worker_registry \
import FabricWorkerRegistryImpl
from avalon_sdk.connector.blockchains.fabric.fabric_work_order \
import FabricWorkOrderImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_worker_registry \
import EthereumWorkerRegistryImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_work_order \
import EthereumWorkOrderProxyImpl
import avalon_sdk.worker.worker_details as worker_details
logger = logging.getLogger(__name__)
TCFHOME = os.environ.get("TCF_HOME", "../../")
def config_file_read():
config = pconfig.parse_configuration_files(
env.conffiles, env.confpaths)
logger.info(" URI client %s \n", config["tcf"]["json_rpc_uri"])
config["tcf"]["json_rpc_uri"] = env.uri_client_sdk
return config
def _create_worker_registry_instance(blockchain_type, config):
# create worker registry instance for direct/proxy model
if env.proxy_mode and blockchain_type == 'fabric':
return FabricWorkerRegistryImpl(config)
elif env.proxy_mode and blockchain_type == 'ethereum':
return EthereumWorkerRegistryImpl(config)
else:
logger.info("Direct SDK code path\n")
return JRPCWorkerRegistryImpl(config)
def _create_work_order_instance(blockchain_type, config):
# create work order instance for direct/proxy model
if env.proxy_mode and blockchain_type == 'fabric':
return FabricWorkOrderImpl(config)
elif env.proxy_mode and blockchain_type == 'ethereum':
return EthereumWorkOrderProxyImpl(config)
else:
logger.info("Direct SDK code path\n")
return JRPCWorkOrderImpl(config)
def _create_work_order_receipt_instance(blockchain_type, config):
# create work order receipt instance for direct/proxy model
if env.proxy_mode and blockchain_type == 'fabric':
return None
elif env.proxy_mode and blockchain_type == 'ethereum':
# TODO need to implement
return None
else:
logger.info("Direct SDK code path\n")
return JRPCWorkOrderReceiptImpl(config)
def submit_request_listener(
uri_client, input_json_str, output_json_file_name):
logger.info("Listener code path\n")
req_time = time.strftime("%Y%m%d_%H%M%S")
request_method = input_json_str["method"]
input_json_str = json.dumps(input_json_str)
# write request to file
signed_input_file = ('./results/' + output_json_file_name + '_' + req_time
+ '_request.json')
with open(signed_input_file, 'w') as req_file:
req_file.write(json.dumps(input_json_str, ensure_ascii=False))
logger.info("in submit listener %s", input_json_str)
if request_method == "WorkOrderGetResult":
logger.info("- Validating WorkOrderGetResult Response-")
response = {}
response_timeout_start = time.time()
response_timeout_multiplier = ((6000 / 3600) + 6) * 3
while "result" not in response:
if "error" in response:
if response["error"]["code"] != 5:
logger.info('WorkOrderGetResult - '
'Response received with error code. ')
err_cd = 1
break
response_timeout_end = time.time()
if ((response_timeout_end - response_timeout_start) >
response_timeout_multiplier):
logger.info('ERROR: WorkOrderGetResult response is not \
received within expected time.')
break
response = uri_client._postmsg(input_json_str)
else:
logger.info('**********Received Request*********\n%s\n', input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info('**********Received Response*********\n%s\n', response)
# write response to file
response_output_file = ('./results/' + output_json_file_name + '_'
+ req_time + '_response.json')
with open(response_output_file, 'w') as resp_file:
resp_file.write(json.dumps(response, ensure_ascii=False))
return response
def workorder_submit_sdk(wo_params, input_json_obj=None):
logger.info("WorkOrderSubmit SDK code path\n")
if input_json_obj is None:
req_id = 3
else:
req_id = input_json_obj["id"]
config = config_file_read()
work_order = _create_work_order_instance(env.blockchain_type, config)
logger.info(" work order id %s \n", wo_params.get_work_order_id())
logger.info(" worker id %s \n", wo_params.get_worker_id())
logger.info(" Requester ID %s \n", wo_params.get_requester_id())
logger.info(" To string %s \n", wo_params.to_string())
logger.info(" worker id %s \n", wo_params.get_worker_id())
logger.info("Work order submit request : %s, \n \n ",
wo_params.to_jrpc_string(req_id))
response = work_order.work_order_submit(
wo_params.get_work_order_id(),
wo_params.get_worker_id(),
wo_params.get_requester_id(),
wo_params.to_string(),
id=req_id
)
if env.proxy_mode and (type(response) != dict):
if response.value == 0:
response = {"error": {"code": 5}}
else:
response = {"error": {"code": response.value}}
response["workOrderId"] = wo_params.get_work_order_id()
logger.info('**********Received Response*********\n%s\n', response)
return response
def worker_lookup_sdk(worker_type, input_json=None):
logger.info("WorkerLookUp SDK code path\n")
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.blockchain_type == "ethereum":
if worker_type in worker_dict.keys():
worker = WorkerType.TEE_SGX
else:
worker = worker_type
worker_lookup_response = worker_registry.worker_lookup(
worker,
config["WorkerConfig"]["OrganizationId"],
config["WorkerConfig"]["ApplicationTypeId"],
jrpc_req_id
)
else:
worker_lookup_response = worker_registry.worker_lookup(
worker_type=worker_dict.get(worker_type, worker_type), id=jrpc_req_id)
logger.info("\n Worker lookup response: {}\n".format(
json.dumps(worker_lookup_response, indent=4)
))
return worker_lookup_response
def worker_register_sdk(register_params, input_json):
logger.info("WorkerRegister SDK code path\n")
jrpc_req_id = input_json["id"]
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]))
else:
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]), jrpc_req_id)
logger.info("\n Worker register response: {}\n".format(
json.dumps(worker_register_result, indent=4)))
return worker_register_result
def worker_setstatus_sdk(set_status_params, input_json):
logger.info("WorkerSetStatus SDK code path\n")
logger.info("Worker status params %s \n", set_status_params)
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
status_dict = {1: WorkerStatus.ACTIVE, 2: WorkerStatus.OFF_LINE,
3: WorkerStatus.DECOMMISSIONED,
4: WorkerStatus.COMPROMISED}
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]])
else:
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]], jrpc_req_id)
if env.proxy_mode:
result = worker_setstatus_result
worker_setstatus_result = {}
worker_setstatus_result["error"] = {"code" : result.value, "message" : ""}
logger.info("\n Worker setstatus response: {}\n".format(worker_setstatus_result))
return worker_setstatus_result
def worker_retrieve_sdk(worker_id, input_json=None):
logger.info("WorkerRetrieve SDK code path\n")
worker_obj = worker_details.SGXWorkerDetails()
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
worker_retrieve_result = worker_registry.worker_retrieve(worker_id, jrpc_req_id)
if env.proxy_mode:
if worker_retrieve_result is None:
worker_retrieve_result = {"error": {"code": '', "message": "Worker Id not found"}}
else:
response = worker_retrieve_result
worker_obj.load_worker(json.loads(response[4]))
worker_retrieve_result = {}
result = {"workerType": response[1],
"organizationId": response[2],
"applicationTypeId": response[3],
"details": json.loads(response[4])}
worker_retrieve_result["result"] = result
if "error" in worker_retrieve_result:
logger.error("Unable to retrieve worker details\n")
return worker_retrieve_result
logger.info("\n Worker retrieve response: {}\n".format(worker_retrieve_result))
worker_obj.worker_id = worker_id
worker_retrieve_result["workerId"] = worker_id
logger.info("\n Worker ID\n%s\n", worker_id)
return worker_retrieve_result
def worker_update_sdk(update_params, input_json=None):
logger.info("WorkerUpdate SDK code path\n")
logger.info("Worker update params %s \n", update_params)
worker_obj = worker_details.SGXWorkerDetails()
# update_params = json.loads(update_params)
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]))
else:
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]), jrpc_req_id)
if env.proxy_mode and (type(worker_update_result) != dict):
response = worker_update_result.value
worker_update_result = {"error": {"code": response, "message" : ""}}
logger.info("\n Worker update response: {}\n".format(worker_update_result))
return worker_update_result
def workorder_receiptcreate_sdk(wo_create_receipt, input_json):
logger.info("WorkerReceiptCreate SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
# Create receipt
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
# Submit work order create receipt jrpc request
wo_receipt_resp = wo_receipt.work_order_receipt_create(
wo_create_receipt["workOrderId"],
wo_create_receipt["workerServiceId"],
wo_create_receipt["workerId"],
wo_create_receipt["requesterId"],
wo_create_receipt["receiptCreateStatus"],
wo_create_receipt["workOrderRequestHash"],
wo_create_receipt["requesterGeneratedNonce"],
wo_create_receipt["requesterSignature"],
wo_create_receipt["signatureRules"],
wo_create_receipt["receiptVerificationKey"],
jrpc_req_id
)
logger.info("Work order create receipt response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
def workorder_receiptretrieve_sdk(workorderId, input_json):
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
# Create receipt
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_retrieve(
workorderId, jrpc_req_id)
logger.info("Work order retrieve receipt response : {} \n \n ".format(
wo_receipt_resp
))
# Retrieve last update to receipt by passing 0xFFFFFFFF
jrpc_req_id += 1
receipt_update_retrieve = \
wo_receipt.work_order_receipt_update_retrieve(
workorderId,
None,
1 << 32,
id=jrpc_req_id)
logger.info("\n Last update to receipt receipt is:\n {}".format(
json.dumps(receipt_update_retrieve, indent=4)
))
return receipt_update_retrieve
def workorder_getresult_sdk(workorderId, input_json):
logger.info("WorkOderGetResult SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
work_order = _create_work_order_instance(env.blockchain_type, config)
logger.info("----- Validating WorkOrderGetResult Response ------")
get_result_res = work_order.work_order_get_result(
workorderId, jrpc_req_id)
logger.info("****** WorkOrderGetResult Received Response*****\n%s\n", get_result_res)
if env.proxy_mode and (get_result_res is None):
get_result_res = {"error": {"code": -1}}
return get_result_res
def workorder_receiptlookup_sdk(requesterId, input_json):
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_lookup(
requester_id=requesterId, id=jrpc_req_id)
logger.info("Work order receipt lookup response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
| 39.966234 | 94 | 0.682394 | import logging
import json
import time
import os
import config.config as pconfig
import env
from avalon_sdk.connector.direct.jrpc.jrpc_worker_registry import \
JRPCWorkerRegistryImpl
from avalon_sdk.connector.direct.jrpc.jrpc_work_order import \
JRPCWorkOrderImpl
from avalon_sdk.worker.worker_details import \
WorkerType, WorkerStatus
from avalon_sdk.connector.direct.jrpc.jrpc_work_order_receipt \
import JRPCWorkOrderReceiptImpl
from avalon_sdk.connector.blockchains.fabric.fabric_worker_registry \
import FabricWorkerRegistryImpl
from avalon_sdk.connector.blockchains.fabric.fabric_work_order \
import FabricWorkOrderImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_worker_registry \
import EthereumWorkerRegistryImpl
from avalon_sdk.connector.blockchains.ethereum.ethereum_work_order \
import EthereumWorkOrderProxyImpl
import avalon_sdk.worker.worker_details as worker_details
logger = logging.getLogger(__name__)
TCFHOME = os.environ.get("TCF_HOME", "../../")
def config_file_read():
config = pconfig.parse_configuration_files(
env.conffiles, env.confpaths)
logger.info(" URI client %s \n", config["tcf"]["json_rpc_uri"])
config["tcf"]["json_rpc_uri"] = env.uri_client_sdk
return config
def _create_worker_registry_instance(blockchain_type, config):
if env.proxy_mode and blockchain_type == 'fabric':
return FabricWorkerRegistryImpl(config)
elif env.proxy_mode and blockchain_type == 'ethereum':
return EthereumWorkerRegistryImpl(config)
else:
logger.info("Direct SDK code path\n")
return JRPCWorkerRegistryImpl(config)
def _create_work_order_instance(blockchain_type, config):
if env.proxy_mode and blockchain_type == 'fabric':
return FabricWorkOrderImpl(config)
elif env.proxy_mode and blockchain_type == 'ethereum':
return EthereumWorkOrderProxyImpl(config)
else:
logger.info("Direct SDK code path\n")
return JRPCWorkOrderImpl(config)
def _create_work_order_receipt_instance(blockchain_type, config):
if env.proxy_mode and blockchain_type == 'fabric':
return None
elif env.proxy_mode and blockchain_type == 'ethereum':
return None
else:
logger.info("Direct SDK code path\n")
return JRPCWorkOrderReceiptImpl(config)
def submit_request_listener(
uri_client, input_json_str, output_json_file_name):
logger.info("Listener code path\n")
req_time = time.strftime("%Y%m%d_%H%M%S")
request_method = input_json_str["method"]
input_json_str = json.dumps(input_json_str)
signed_input_file = ('./results/' + output_json_file_name + '_' + req_time
+ '_request.json')
with open(signed_input_file, 'w') as req_file:
req_file.write(json.dumps(input_json_str, ensure_ascii=False))
logger.info("in submit listener %s", input_json_str)
if request_method == "WorkOrderGetResult":
logger.info("- Validating WorkOrderGetResult Response-")
response = {}
response_timeout_start = time.time()
response_timeout_multiplier = ((6000 / 3600) + 6) * 3
while "result" not in response:
if "error" in response:
if response["error"]["code"] != 5:
logger.info('WorkOrderGetResult - '
'Response received with error code. ')
err_cd = 1
break
response_timeout_end = time.time()
if ((response_timeout_end - response_timeout_start) >
response_timeout_multiplier):
logger.info('ERROR: WorkOrderGetResult response is not \
received within expected time.')
break
response = uri_client._postmsg(input_json_str)
else:
logger.info('**********Received Request*********\n%s\n', input_json_str)
response = uri_client._postmsg(input_json_str)
logger.info('**********Received Response*********\n%s\n', response)
response_output_file = ('./results/' + output_json_file_name + '_'
+ req_time + '_response.json')
with open(response_output_file, 'w') as resp_file:
resp_file.write(json.dumps(response, ensure_ascii=False))
return response
def workorder_submit_sdk(wo_params, input_json_obj=None):
logger.info("WorkOrderSubmit SDK code path\n")
if input_json_obj is None:
req_id = 3
else:
req_id = input_json_obj["id"]
config = config_file_read()
work_order = _create_work_order_instance(env.blockchain_type, config)
logger.info(" work order id %s \n", wo_params.get_work_order_id())
logger.info(" worker id %s \n", wo_params.get_worker_id())
logger.info(" Requester ID %s \n", wo_params.get_requester_id())
logger.info(" To string %s \n", wo_params.to_string())
logger.info(" worker id %s \n", wo_params.get_worker_id())
logger.info("Work order submit request : %s, \n \n ",
wo_params.to_jrpc_string(req_id))
response = work_order.work_order_submit(
wo_params.get_work_order_id(),
wo_params.get_worker_id(),
wo_params.get_requester_id(),
wo_params.to_string(),
id=req_id
)
if env.proxy_mode and (type(response) != dict):
if response.value == 0:
response = {"error": {"code": 5}}
else:
response = {"error": {"code": response.value}}
response["workOrderId"] = wo_params.get_work_order_id()
logger.info('**********Received Response*********\n%s\n', response)
return response
def worker_lookup_sdk(worker_type, input_json=None):
logger.info("WorkerLookUp SDK code path\n")
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.blockchain_type == "ethereum":
if worker_type in worker_dict.keys():
worker = WorkerType.TEE_SGX
else:
worker = worker_type
worker_lookup_response = worker_registry.worker_lookup(
worker,
config["WorkerConfig"]["OrganizationId"],
config["WorkerConfig"]["ApplicationTypeId"],
jrpc_req_id
)
else:
worker_lookup_response = worker_registry.worker_lookup(
worker_type=worker_dict.get(worker_type, worker_type), id=jrpc_req_id)
logger.info("\n Worker lookup response: {}\n".format(
json.dumps(worker_lookup_response, indent=4)
))
return worker_lookup_response
def worker_register_sdk(register_params, input_json):
logger.info("WorkerRegister SDK code path\n")
jrpc_req_id = input_json["id"]
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
worker_dict = {'SGX': WorkerType.TEE_SGX,
'MPC': WorkerType.MPC, 'ZK': WorkerType.ZK}
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]))
else:
worker_register_result = worker_registry.worker_register(
register_params["worker_id"],
worker_dict[register_params["workerType"]],
register_params["organization_id"],
register_params["application_type_id"],
json.dumps(register_params["details"]), jrpc_req_id)
logger.info("\n Worker register response: {}\n".format(
json.dumps(worker_register_result, indent=4)))
return worker_register_result
def worker_setstatus_sdk(set_status_params, input_json):
logger.info("WorkerSetStatus SDK code path\n")
logger.info("Worker status params %s \n", set_status_params)
if input_json is None:
jrpc_req_id = 3
else:
jrpc_req_id = input_json["id"]
status_dict = {1: WorkerStatus.ACTIVE, 2: WorkerStatus.OFF_LINE,
3: WorkerStatus.DECOMMISSIONED,
4: WorkerStatus.COMPROMISED}
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]])
else:
worker_setstatus_result = worker_registry.worker_set_status(
set_status_params["worker_id"],
status_dict[set_status_params["status"]], jrpc_req_id)
if env.proxy_mode:
result = worker_setstatus_result
worker_setstatus_result = {}
worker_setstatus_result["error"] = {"code" : result.value, "message" : ""}
logger.info("\n Worker setstatus response: {}\n".format(worker_setstatus_result))
return worker_setstatus_result
def worker_retrieve_sdk(worker_id, input_json=None):
logger.info("WorkerRetrieve SDK code path\n")
worker_obj = worker_details.SGXWorkerDetails()
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
worker_retrieve_result = worker_registry.worker_retrieve(worker_id, jrpc_req_id)
if env.proxy_mode:
if worker_retrieve_result is None:
worker_retrieve_result = {"error": {"code": '', "message": "Worker Id not found"}}
else:
response = worker_retrieve_result
worker_obj.load_worker(json.loads(response[4]))
worker_retrieve_result = {}
result = {"workerType": response[1],
"organizationId": response[2],
"applicationTypeId": response[3],
"details": json.loads(response[4])}
worker_retrieve_result["result"] = result
if "error" in worker_retrieve_result:
logger.error("Unable to retrieve worker details\n")
return worker_retrieve_result
logger.info("\n Worker retrieve response: {}\n".format(worker_retrieve_result))
worker_obj.worker_id = worker_id
worker_retrieve_result["workerId"] = worker_id
logger.info("\n Worker ID\n%s\n", worker_id)
return worker_retrieve_result
def worker_update_sdk(update_params, input_json=None):
logger.info("WorkerUpdate SDK code path\n")
logger.info("Worker update params %s \n", update_params)
worker_obj = worker_details.SGXWorkerDetails()
if input_json is None:
jrpc_req_id = 11
else:
jrpc_req_id = input_json["id"]
config = config_file_read()
worker_registry = _create_worker_registry_instance(env.blockchain_type, config)
if env.proxy_mode and (env.blockchain_type == "ethereum"):
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]))
else:
worker_update_result = worker_registry.worker_update(
update_params["worker_id"],
json.dumps(update_params["details"]), jrpc_req_id)
if env.proxy_mode and (type(worker_update_result) != dict):
response = worker_update_result.value
worker_update_result = {"error": {"code": response, "message" : ""}}
logger.info("\n Worker update response: {}\n".format(worker_update_result))
return worker_update_result
def workorder_receiptcreate_sdk(wo_create_receipt, input_json):
logger.info("WorkerReceiptCreate SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_create(
wo_create_receipt["workOrderId"],
wo_create_receipt["workerServiceId"],
wo_create_receipt["workerId"],
wo_create_receipt["requesterId"],
wo_create_receipt["receiptCreateStatus"],
wo_create_receipt["workOrderRequestHash"],
wo_create_receipt["requesterGeneratedNonce"],
wo_create_receipt["requesterSignature"],
wo_create_receipt["signatureRules"],
wo_create_receipt["receiptVerificationKey"],
jrpc_req_id
)
logger.info("Work order create receipt response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
def workorder_receiptretrieve_sdk(workorderId, input_json):
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_retrieve(
workorderId, jrpc_req_id)
logger.info("Work order retrieve receipt response : {} \n \n ".format(
wo_receipt_resp
))
jrpc_req_id += 1
receipt_update_retrieve = \
wo_receipt.work_order_receipt_update_retrieve(
workorderId,
None,
1 << 32,
id=jrpc_req_id)
logger.info("\n Last update to receipt receipt is:\n {}".format(
json.dumps(receipt_update_retrieve, indent=4)
))
return receipt_update_retrieve
def workorder_getresult_sdk(workorderId, input_json):
logger.info("WorkOderGetResult SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
work_order = _create_work_order_instance(env.blockchain_type, config)
logger.info("----- Validating WorkOrderGetResult Response ------")
get_result_res = work_order.work_order_get_result(
workorderId, jrpc_req_id)
logger.info("****** WorkOrderGetResult Received Response*****\n%s\n", get_result_res)
if env.proxy_mode and (get_result_res is None):
get_result_res = {"error": {"code": -1}}
return get_result_res
def workorder_receiptlookup_sdk(requesterId, input_json):
logger.info("ReceiptRetrieve SDK code path\n")
jrpc_req_id = input_json["id"]
config = config_file_read()
wo_receipt = _create_work_order_receipt_instance(env.blockchain_type, config)
wo_receipt_resp = wo_receipt.work_order_receipt_lookup(
requester_id=requesterId, id=jrpc_req_id)
logger.info("Work order receipt lookup response : {} \n \n ".format(
wo_receipt_resp
))
return wo_receipt_resp
| true | true |
f724badd51da8585ae02ba905411f8a5bc0b42c6 | 7,728 | py | Python | pandas/tests/indexes/timedeltas/test_timedelta.py | hkennyv/pandas | 31875eb3d8a56f359c2f529f86b867572d5dfeb1 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-04-26T22:11:21.000Z | 2020-04-26T22:11:21.000Z | pandas/tests/indexes/timedeltas/test_timedelta.py | hkennyv/pandas | 31875eb3d8a56f359c2f529f86b867572d5dfeb1 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null | pandas/tests/indexes/timedeltas/test_timedelta.py | hkennyv/pandas | 31875eb3d8a56f359c2f529f86b867572d5dfeb1 | [
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause"
] | 1 | 2020-06-19T11:52:05.000Z | 2020-06-19T11:52:05.000Z | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Int64Index,
Series,
Timedelta,
TimedeltaIndex,
array,
date_range,
timedelta_range,
)
import pandas._testing as tm
from ..datetimelike import DatetimeLike
randn = np.random.randn
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
@pytest.fixture
def indices(self):
return tm.makeTimedeltaIndex(10)
def create_index(self) -> TimedeltaIndex:
index = pd.to_timedelta(range(5), unit="d")._with_freq("infer")
assert index.freq == "D"
return index + pd.offsets.Hour(1)
def test_numeric_compat(self):
# Dummy method to override super's version; this test is now done
# in test_arithmetic.py
pass
def test_shift(self):
pass # this is handled in test_arithmetic.py
def test_pickle_compat_construction(self):
pass
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
tm.assert_almost_equal(
index.isin([index[2], 5]), np.array([False, False, True, False])
)
def test_factorize(self):
idx1 = TimedeltaIndex(["1 day", "1 day", "2 day", "2 day", "3 day", "3 day"])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(["1 day", "2 day", "3 day"])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range("1 day", periods=4, freq="s")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_sort_values(self):
idx = TimedeltaIndex(["4d", "1d", "2d"])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]), check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]), check_dtype=False)
@pytest.mark.parametrize("klass", [list, np.array, array, Series])
def test_searchsorted_different_argument_classes(self, klass):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
result = idx.searchsorted(klass(idx))
expected = np.arange(len(idx), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
result = idx._data.searchsorted(klass(idx))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"arg",
[[1, 2], ["a", "b"], [pd.Timestamp("2020-01-01", tz="Europe/London")] * 2],
)
def test_searchsorted_invalid_argument_dtype(self, arg):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
msg = "searchsorted requires compatible dtype"
with pytest.raises(TypeError, match=msg):
idx.searchsorted(arg)
def test_argmin_argmax(self):
idx = TimedeltaIndex(["1 day 00:00:05", "1 day 00:00:01", "1 day 00:00:02"])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(["3d", "1d", "2d"])
assert not idx.equals(list(idx))
non_td = Index(list("abc"))
assert not idx.equals(list(non_td))
def test_map(self):
# test_map_dictlike generally tests
rng = timedelta_range("1 day", periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range("1 days", "10 days")
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_append_numpy_bug_1681(self):
td = timedelta_range("1 days", "10 days", freq="2D")
a = DataFrame()
c = DataFrame({"A": "foo", "B": td}, index=td)
str(c)
result = a.append(c)
assert (result["B"] == td).all()
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
tm.assert_index_equal(rng.days, Index([1, 1], dtype="int64"))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype="int64"),
)
tm.assert_index_equal(
rng.microseconds, Index([100 * 1000 + 123, 100 * 1000 + 123], dtype="int64")
)
tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype="int64"))
msg = "'TimedeltaIndex' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(
s.dt.seconds, Series([10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1])
)
# preserve name (GH15589)
rng.name = "name"
assert rng.days.name == "name"
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range("20130101", periods=4)) - Series(
date_range("20121201", periods=4)
)
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, "D")
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
tm.assert_series_equal(result, expected)
result = td.astype("timedelta64[D]")
expected = Series([31, 31, 31, np.nan])
tm.assert_series_equal(result, expected)
result = td / np.timedelta64(1, "s")
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])
tm.assert_series_equal(result, expected)
result = td.astype("timedelta64[s]")
tm.assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, "D")
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
tm.assert_index_equal(result, expected)
result = td.astype("timedelta64[D]")
expected = Index([31, 31, 31, np.nan])
tm.assert_index_equal(result, expected)
result = td / np.timedelta64(1, "s")
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])
tm.assert_index_equal(result, expected)
result = td.astype("timedelta64[s]")
tm.assert_index_equal(result, expected)
| 31.672131 | 88 | 0.601708 | from datetime import timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
Int64Index,
Series,
Timedelta,
TimedeltaIndex,
array,
date_range,
timedelta_range,
)
import pandas._testing as tm
from ..datetimelike import DatetimeLike
randn = np.random.randn
class TestTimedeltaIndex(DatetimeLike):
_holder = TimedeltaIndex
@pytest.fixture
def indices(self):
return tm.makeTimedeltaIndex(10)
def create_index(self) -> TimedeltaIndex:
index = pd.to_timedelta(range(5), unit="d")._with_freq("infer")
assert index.freq == "D"
return index + pd.offsets.Hour(1)
def test_numeric_compat(self):
# in test_arithmetic.py
pass
def test_shift(self):
pass # this is handled in test_arithmetic.py
def test_pickle_compat_construction(self):
pass
def test_isin(self):
index = tm.makeTimedeltaIndex(4)
result = index.isin(index)
assert result.all()
result = index.isin(list(index))
assert result.all()
tm.assert_almost_equal(
index.isin([index[2], 5]), np.array([False, False, True, False])
)
def test_factorize(self):
idx1 = TimedeltaIndex(["1 day", "1 day", "2 day", "2 day", "3 day", "3 day"])
exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)
exp_idx = TimedeltaIndex(["1 day", "2 day", "3 day"])
arr, idx = idx1.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
arr, idx = idx1.factorize(sort=True)
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, exp_idx)
# freq must be preserved
idx3 = timedelta_range("1 day", periods=4, freq="s")
exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)
arr, idx = idx3.factorize()
tm.assert_numpy_array_equal(arr, exp_arr)
tm.assert_index_equal(idx, idx3)
def test_sort_values(self):
idx = TimedeltaIndex(["4d", "1d", "2d"])
ordered = idx.sort_values()
assert ordered.is_monotonic
ordered = idx.sort_values(ascending=False)
assert ordered[::-1].is_monotonic
ordered, dexer = idx.sort_values(return_indexer=True)
assert ordered.is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0]), check_dtype=False)
ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)
assert ordered[::-1].is_monotonic
tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1]), check_dtype=False)
@pytest.mark.parametrize("klass", [list, np.array, array, Series])
def test_searchsorted_different_argument_classes(self, klass):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
result = idx.searchsorted(klass(idx))
expected = np.arange(len(idx), dtype=result.dtype)
tm.assert_numpy_array_equal(result, expected)
result = idx._data.searchsorted(klass(idx))
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"arg",
[[1, 2], ["a", "b"], [pd.Timestamp("2020-01-01", tz="Europe/London")] * 2],
)
def test_searchsorted_invalid_argument_dtype(self, arg):
idx = TimedeltaIndex(["1 day", "2 days", "3 days"])
msg = "searchsorted requires compatible dtype"
with pytest.raises(TypeError, match=msg):
idx.searchsorted(arg)
def test_argmin_argmax(self):
idx = TimedeltaIndex(["1 day 00:00:05", "1 day 00:00:01", "1 day 00:00:02"])
assert idx.argmin() == 1
assert idx.argmax() == 0
def test_misc_coverage(self):
rng = timedelta_range("1 day", periods=5)
result = rng.groupby(rng.days)
assert isinstance(list(result.values())[0][0], Timedelta)
idx = TimedeltaIndex(["3d", "1d", "2d"])
assert not idx.equals(list(idx))
non_td = Index(list("abc"))
assert not idx.equals(list(non_td))
def test_map(self):
# test_map_dictlike generally tests
rng = timedelta_range("1 day", periods=10)
f = lambda x: x.days
result = rng.map(f)
exp = Int64Index([f(x) for x in rng])
tm.assert_index_equal(result, exp)
def test_pass_TimedeltaIndex_to_index(self):
rng = timedelta_range("1 days", "10 days")
idx = Index(rng, dtype=object)
expected = Index(rng.to_pytimedelta(), dtype=object)
tm.assert_numpy_array_equal(idx.values, expected.values)
def test_append_numpy_bug_1681(self):
td = timedelta_range("1 days", "10 days", freq="2D")
a = DataFrame()
c = DataFrame({"A": "foo", "B": td}, index=td)
str(c)
result = a.append(c)
assert (result["B"] == td).all()
def test_fields(self):
rng = timedelta_range("1 days, 10:11:12.100123456", periods=2, freq="s")
tm.assert_index_equal(rng.days, Index([1, 1], dtype="int64"))
tm.assert_index_equal(
rng.seconds,
Index([10 * 3600 + 11 * 60 + 12, 10 * 3600 + 11 * 60 + 13], dtype="int64"),
)
tm.assert_index_equal(
rng.microseconds, Index([100 * 1000 + 123, 100 * 1000 + 123], dtype="int64")
)
tm.assert_index_equal(rng.nanoseconds, Index([456, 456], dtype="int64"))
msg = "'TimedeltaIndex' object has no attribute '{}'"
with pytest.raises(AttributeError, match=msg.format("hours")):
rng.hours
with pytest.raises(AttributeError, match=msg.format("minutes")):
rng.minutes
with pytest.raises(AttributeError, match=msg.format("milliseconds")):
rng.milliseconds
# with nat
s = Series(rng)
s[1] = np.nan
tm.assert_series_equal(s.dt.days, Series([1, np.nan], index=[0, 1]))
tm.assert_series_equal(
s.dt.seconds, Series([10 * 3600 + 11 * 60 + 12, np.nan], index=[0, 1])
)
# preserve name (GH15589)
rng.name = "name"
assert rng.days.name == "name"
def test_freq_conversion(self):
# doc example
# series
td = Series(date_range("20130101", periods=4)) - Series(
date_range("20121201", periods=4)
)
td[2] += timedelta(minutes=5, seconds=3)
td[3] = np.nan
result = td / np.timedelta64(1, "D")
expected = Series([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
tm.assert_series_equal(result, expected)
result = td.astype("timedelta64[D]")
expected = Series([31, 31, 31, np.nan])
tm.assert_series_equal(result, expected)
result = td / np.timedelta64(1, "s")
expected = Series([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])
tm.assert_series_equal(result, expected)
result = td.astype("timedelta64[s]")
tm.assert_series_equal(result, expected)
# tdi
td = TimedeltaIndex(td)
result = td / np.timedelta64(1, "D")
expected = Index([31, 31, (31 * 86400 + 5 * 60 + 3) / 86400.0, np.nan])
tm.assert_index_equal(result, expected)
result = td.astype("timedelta64[D]")
expected = Index([31, 31, 31, np.nan])
tm.assert_index_equal(result, expected)
result = td / np.timedelta64(1, "s")
expected = Index([31 * 86400, 31 * 86400, 31 * 86400 + 5 * 60 + 3, np.nan])
tm.assert_index_equal(result, expected)
result = td.astype("timedelta64[s]")
tm.assert_index_equal(result, expected)
| true | true |
f724bb27bccc8713b9577f3921f30f709db72e74 | 476 | py | Python | week05/server/week06lab2.py | tullowhurler/datarep | 38b145d9a49c1b29c02597381de93bbac4a7edae | [
"Apache-2.0"
] | null | null | null | week05/server/week06lab2.py | tullowhurler/datarep | 38b145d9a49c1b29c02597381de93bbac4a7edae | [
"Apache-2.0"
] | null | null | null | week05/server/week06lab2.py | tullowhurler/datarep | 38b145d9a49c1b29c02597381de93bbac4a7edae | [
"Apache-2.0"
] | null | null | null | import requests
import json
#html = '<h1>hello world</h1>This is html'
f = open("../../week05/carviewer.html", "r")
html = f.read()
#print (html)
apiKey = '46ceed910c24ff7cce8240e89ec7b71912f6f40f2ec55fd217ce150a d6d4f1c4'
url = 'https://api.html2pdf.app/v1/generate'
data = {'html': html,'apiKey': apiKey}
response = requests.post(url, json=data)
print (response.status_code)
newFile = open("lab06.02.01.htmlaspdf.pdf", "wb")
newFile.write(response.content) | 28 | 77 | 0.707983 | import requests
import json
f = open("../../week05/carviewer.html", "r")
html = f.read()
apiKey = '46ceed910c24ff7cce8240e89ec7b71912f6f40f2ec55fd217ce150a d6d4f1c4'
url = 'https://api.html2pdf.app/v1/generate'
data = {'html': html,'apiKey': apiKey}
response = requests.post(url, json=data)
print (response.status_code)
newFile = open("lab06.02.01.htmlaspdf.pdf", "wb")
newFile.write(response.content) | true | true |
f724bb485ff4b58748536534ebf2f5897e7eed1f | 2,258 | py | Python | azext_iot/__init__.py | YingXue/azure-iot-cli-extension | efe7897b1ae1d2a9953f501abe7654b84d69372d | [
"MIT"
] | 79 | 2017-09-25T19:29:17.000Z | 2022-03-30T20:55:57.000Z | azext_iot/__init__.py | YingXue/azure-iot-cli-extension | efe7897b1ae1d2a9953f501abe7654b84d69372d | [
"MIT"
] | 305 | 2018-01-17T01:12:10.000Z | 2022-03-23T22:38:11.000Z | azext_iot/__init__.py | YingXue/azure-iot-cli-extension | efe7897b1ae1d2a9953f501abe7654b84d69372d | [
"MIT"
] | 69 | 2017-11-14T00:30:46.000Z | 2022-03-01T17:11:45.000Z | # coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import CliCommandType
from azext_iot._factory import iot_service_provisioning_factory
from azext_iot.constants import VERSION
import azext_iot._help # noqa: F401
from azext_iot.product.command_map import load_product_commands
iothub_ops = CliCommandType(operations_tmpl="azext_iot.operations.hub#{}")
iotdps_ops = CliCommandType(
operations_tmpl="azext_iot.operations.dps#{}",
client_factory=iot_service_provisioning_factory,
)
class IoTExtCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
super(IoTExtCommandsLoader, self).__init__(cli_ctx=cli_ctx)
def load_command_table(self, args):
from azext_iot.commands import load_command_table
from azext_iot.iothub.command_map import load_iothub_commands
from azext_iot.central.command_map import load_central_commands
from azext_iot.digitaltwins.command_map import load_digitaltwins_commands
load_command_table(self, args)
load_iothub_commands(self, args)
load_central_commands(self, args)
load_digitaltwins_commands(self, args)
load_product_commands(self, args)
return self.command_table
def load_arguments(self, command):
from azext_iot._params import load_arguments
from azext_iot.iothub.params import load_iothub_arguments
from azext_iot.central.params import load_central_arguments
from azext_iot.digitaltwins.params import load_digitaltwins_arguments
from azext_iot.product.params import load_product_params
load_arguments(self, command)
load_iothub_arguments(self, command)
load_central_arguments(self, command)
load_digitaltwins_arguments(self, command)
load_product_params(self, command)
COMMAND_LOADER_CLS = IoTExtCommandsLoader
__version__ = VERSION
| 39.614035 | 94 | 0.712578 |
from azure.cli.core import AzCommandsLoader
from azure.cli.core.commands import CliCommandType
from azext_iot._factory import iot_service_provisioning_factory
from azext_iot.constants import VERSION
import azext_iot._help
from azext_iot.product.command_map import load_product_commands
iothub_ops = CliCommandType(operations_tmpl="azext_iot.operations.hub#{}")
iotdps_ops = CliCommandType(
operations_tmpl="azext_iot.operations.dps#{}",
client_factory=iot_service_provisioning_factory,
)
class IoTExtCommandsLoader(AzCommandsLoader):
def __init__(self, cli_ctx=None):
super(IoTExtCommandsLoader, self).__init__(cli_ctx=cli_ctx)
def load_command_table(self, args):
from azext_iot.commands import load_command_table
from azext_iot.iothub.command_map import load_iothub_commands
from azext_iot.central.command_map import load_central_commands
from azext_iot.digitaltwins.command_map import load_digitaltwins_commands
load_command_table(self, args)
load_iothub_commands(self, args)
load_central_commands(self, args)
load_digitaltwins_commands(self, args)
load_product_commands(self, args)
return self.command_table
def load_arguments(self, command):
from azext_iot._params import load_arguments
from azext_iot.iothub.params import load_iothub_arguments
from azext_iot.central.params import load_central_arguments
from azext_iot.digitaltwins.params import load_digitaltwins_arguments
from azext_iot.product.params import load_product_params
load_arguments(self, command)
load_iothub_arguments(self, command)
load_central_arguments(self, command)
load_digitaltwins_arguments(self, command)
load_product_params(self, command)
COMMAND_LOADER_CLS = IoTExtCommandsLoader
__version__ = VERSION
| true | true |
f724bc1d7c934473af2dfd10b1805761e9c49da5 | 97 | py | Python | Coursera_HSE_Py_Ass-5.4-Stairs.py | YuriRevin/Coursera_HSE_Py_Ass-5.4-Stairs | a6327a94c4b65fc75e3d99475e27464c85df0185 | [
"MIT"
] | null | null | null | Coursera_HSE_Py_Ass-5.4-Stairs.py | YuriRevin/Coursera_HSE_Py_Ass-5.4-Stairs | a6327a94c4b65fc75e3d99475e27464c85df0185 | [
"MIT"
] | null | null | null | Coursera_HSE_Py_Ass-5.4-Stairs.py | YuriRevin/Coursera_HSE_Py_Ass-5.4-Stairs | a6327a94c4b65fc75e3d99475e27464c85df0185 | [
"MIT"
] | null | null | null | for i in range(int(input())):
for a in range(1, i + 2):
print(a, end='')
print()
| 19.4 | 29 | 0.484536 | for i in range(int(input())):
for a in range(1, i + 2):
print(a, end='')
print()
| true | true |
f724bcbe159f2f3f6bd41ab81d7cfc844b8fbfab | 10,961 | bzl | Python | internal/pkg_npm/pkg_npm.bzl | mr-tim/rules_nodejs | 7648412d96828875343b0d9c74ddf4d7531eed72 | [
"Apache-2.0"
] | 1 | 2020-10-25T10:29:06.000Z | 2020-10-25T10:29:06.000Z | internal/pkg_npm/pkg_npm.bzl | samschlegel/rules_nodejs | 21836475c2294476a5a792d5fb0ee3e713f7c6fe | [
"Apache-2.0"
] | 8 | 2021-03-11T00:12:31.000Z | 2022-02-27T07:35:43.000Z | internal/pkg_npm/pkg_npm.bzl | samschlegel/rules_nodejs | 21836475c2294476a5a792d5fb0ee3e713f7c6fe | [
"Apache-2.0"
] | null | null | null | """npm packaging
Note, this is intended for sharing library code with non-Bazel consumers.
If all users of your library code use Bazel, they should just add your library
to the `deps` of one of their targets.
"""
load("//:providers.bzl", "DeclarationInfo", "JSNamedModuleInfo", "LinkablePackageInfo", "NodeContextInfo")
_DOC = """The pkg_npm rule creates a directory containing a publishable npm artifact.
Example:
```python
load("@build_bazel_rules_nodejs//:index.bzl", "pkg_npm")
pkg_npm(
name = "my_package",
srcs = ["package.json"],
deps = [":my_typescript_lib"],
substitutions = {"//internal/": "//"},
)
```
You can use a pair of `// BEGIN-INTERNAL ... // END-INTERNAL` comments to mark regions of files that should be elided during publishing.
For example:
```javascript
function doThing() {
// BEGIN-INTERNAL
// This is a secret internal-only comment
doInternalOnlyThing();
// END-INTERNAL
}
```
With the Bazel stamping feature, pkg_npm will replace any placeholder version in your package with the actual version control tag.
See the [stamping documentation](https://github.com/bazelbuild/rules_nodejs/blob/master/docs/index.md#stamping)
Usage:
`pkg_npm` yields three labels. Build the package directory using the default label:
```sh
$ bazel build :my_package
Target //:my_package up-to-date:
bazel-out/fastbuild/bin/my_package
$ ls -R bazel-out/fastbuild/bin/my_package
```
Dry-run of publishing to npm, calling `npm pack` (it builds the package first if needed):
```sh
$ bazel run :my_package.pack
INFO: Running command line: bazel-out/fastbuild/bin/my_package.pack
my-package-name-1.2.3.tgz
$ tar -tzf my-package-name-1.2.3.tgz
```
Actually publish the package with `npm publish` (also builds first):
```sh
# Check login credentials
$ bazel run @nodejs//:npm_node_repositories who
# Publishes the package
$ bazel run :my_package.publish
```
You can pass arguments to npm by escaping them from Bazel using a double-hyphen, for example:
`bazel run my_package.publish -- --tag=next`
"""
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
PKG_NPM_ATTRS = {
"package_name": attr.string(
doc = """Optional package_name that this npm package may be imported as.""",
),
"srcs": attr.label_list(
doc = """Files inside this directory which are simply copied into the package.""",
allow_files = True,
),
"hide_build_files": attr.bool(
doc = """If set BUILD and BUILD.bazel files are prefixed with `_` in the npm package.
The default is True since npm packages that contain BUILD files don't work with
`yarn_install` and `npm_install` without a post-install step that deletes or renames them.
NB: Bazel has a change in https://github.com/bazelbuild/bazel/pull/10261
(expected in version 2.1) that adds .bazelignore
support for external repositories, which will make this attribute obsolete.""",
default = True,
),
"nested_packages": attr.label_list(
doc = """Other pkg_npm rules whose content is copied into this package.""",
allow_files = True,
),
"node_context_data": attr.label(
default = "@build_bazel_rules_nodejs//internal:node_context_data",
providers = [NodeContextInfo],
doc = "Internal use only",
),
"replace_with_version": attr.string(
doc = """If set this value is replaced with the version stamp data.
See the section on stamping in the README.""",
default = "0.0.0-PLACEHOLDER",
),
"substitutions": attr.string_dict(
doc = """Key-value pairs which are replaced in all the files while building the package.""",
),
"vendor_external": attr.string_list(
doc = """External workspaces whose contents should be vendored into this workspace.
Avoids 'external/foo' path segments in the resulting package.""",
),
"deps": attr.label_list(
doc = """Other targets which produce files that should be included in the package, such as `rollup_bundle`""",
allow_files = True,
),
"_npm_script_generator": attr.label(
default = Label("//internal/pkg_npm:npm_script_generator"),
cfg = "host",
executable = True,
),
"_packager": attr.label(
default = Label("//internal/pkg_npm:packager"),
cfg = "host",
executable = True,
),
"_run_npm_template": attr.label(
default = Label("@nodejs//:run_npm.sh.template"),
allow_single_file = True,
),
}
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
PKG_NPM_OUTPUTS = {
"pack": "%{name}.pack",
"publish": "%{name}.publish",
}
# Takes a depset of files and returns a corresponding list of file paths without any files
# that aren't part of the specified package path. Also include files from external repositories
# that explicitly specified in the vendor_external list.
def _filter_out_external_files(ctx, files, package_path):
result = []
for file in files:
# NB: package_path may be an empty string
if file.short_path.startswith(package_path) and not file.short_path.startswith("../"):
result.append(file.path)
else:
for v in ctx.attr.vendor_external:
if file.short_path.startswith("../%s/" % v):
result.append(file.path)
return result
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
def create_package(ctx, deps_files, nested_packages):
"""Creates an action that produces the npm package.
It copies srcs and deps into the artifact and produces the .pack and .publish
scripts.
Args:
ctx: the skylark rule context
deps_files: list of files to include in the package which have been
specified as dependencies
nested_packages: list of TreeArtifact outputs from other actions which are
to be nested inside this package
Returns:
The tree artifact which is the publishable directory.
"""
stamp = ctx.attr.node_context_data[NodeContextInfo].stamp
all_files = deps_files + ctx.files.srcs
if not stamp and len(all_files) == 1 and all_files[0].is_directory and len(ctx.files.nested_packages) == 0:
# Special case where these is a single dep that is a directory artifact and there are no
# source files or nested_packages; in that case we assume the package is contained within
# that single directory and there is no work to do
package_dir = all_files[0]
_create_npm_scripts(ctx, package_dir)
return package_dir
package_dir = ctx.actions.declare_directory(ctx.label.name)
package_path = ctx.label.package
# List of dependency sources which are local to the package that defines the current
# target. Also include files from external repositories that explicitly specified in
# the vendor_external list. We only want to package deps files which are inside of the
# current package unless explicitely specified.
filtered_deps_sources = _filter_out_external_files(ctx, deps_files, package_path)
args = ctx.actions.args()
args.use_param_file("%s", use_always = True)
args.add(package_dir.path)
args.add(package_path)
args.add_joined([s.path for s in ctx.files.srcs], join_with = ",", omit_if_empty = False)
args.add(ctx.bin_dir.path)
args.add(ctx.genfiles_dir.path)
args.add_joined(filtered_deps_sources, join_with = ",", omit_if_empty = False)
args.add_joined([p.path for p in nested_packages], join_with = ",", omit_if_empty = False)
args.add(ctx.attr.substitutions)
args.add(ctx.attr.replace_with_version)
args.add(ctx.version_file.path if stamp else "")
args.add_joined(ctx.attr.vendor_external, join_with = ",", omit_if_empty = False)
args.add("1" if ctx.attr.hide_build_files else "0")
inputs = ctx.files.srcs + deps_files + nested_packages
# The version_file is an undocumented attribute of the ctx that lets us read the volatile-status.txt file
# produced by the --workspace_status_command. That command will be executed whenever
# this action runs, so we get the latest version info on each execution.
# See https://github.com/bazelbuild/bazel/issues/1054
if stamp:
inputs.append(ctx.version_file)
ctx.actions.run(
progress_message = "Assembling npm package %s" % package_dir.short_path,
mnemonic = "AssembleNpmPackage",
executable = ctx.executable._packager,
inputs = inputs,
outputs = [package_dir],
arguments = [args],
)
_create_npm_scripts(ctx, package_dir)
return package_dir
def _create_npm_scripts(ctx, package_dir):
args = ctx.actions.args()
args.add_all([
package_dir.path,
ctx.outputs.pack.path,
ctx.outputs.publish.path,
ctx.file._run_npm_template.path,
])
ctx.actions.run(
progress_message = "Generating npm pack & publish scripts",
mnemonic = "GenerateNpmScripts",
executable = ctx.executable._npm_script_generator,
inputs = [ctx.file._run_npm_template, package_dir],
outputs = [ctx.outputs.pack, ctx.outputs.publish],
arguments = [args],
# Must be run local (no sandbox) so that the pwd is the actual execroot
# in the script which is used to generate the path in the pack & publish
# scripts.
execution_requirements = {"local": "1"},
)
def _pkg_npm(ctx):
deps_files_depsets = []
for dep in ctx.attr.deps:
# Collect whatever is in the "data"
deps_files_depsets.append(dep.data_runfiles.files)
# Only collect DefaultInfo files (not transitive)
deps_files_depsets.append(dep.files)
# All direct & transitive JavaScript-producing deps
# TODO: switch to JSModuleInfo when it is available
if JSNamedModuleInfo in dep:
deps_files_depsets.append(dep[JSNamedModuleInfo].sources)
# Include all transitive declerations
if DeclarationInfo in dep:
deps_files_depsets.append(dep[DeclarationInfo].transitive_declarations)
# Note: to_list() should be called once per rule!
deps_files = depset(transitive = deps_files_depsets).to_list()
package_dir = create_package(ctx, deps_files, ctx.files.nested_packages)
package_dir_depset = depset([package_dir])
result = [
DefaultInfo(
files = package_dir_depset,
runfiles = ctx.runfiles([package_dir]),
),
]
if ctx.attr.package_name:
result.append(LinkablePackageInfo(
package_name = ctx.attr.package_name,
path = package_dir.path,
files = package_dir_depset,
))
return result
pkg_npm = rule(
implementation = _pkg_npm,
attrs = PKG_NPM_ATTRS,
doc = _DOC,
outputs = PKG_NPM_OUTPUTS,
)
| 36.055921 | 136 | 0.684609 |
load("//:providers.bzl", "DeclarationInfo", "JSNamedModuleInfo", "LinkablePackageInfo", "NodeContextInfo")
_DOC = """The pkg_npm rule creates a directory containing a publishable npm artifact.
Example:
```python
load("@build_bazel_rules_nodejs//:index.bzl", "pkg_npm")
pkg_npm(
name = "my_package",
srcs = ["package.json"],
deps = [":my_typescript_lib"],
substitutions = {"//internal/": "//"},
)
```
You can use a pair of `// BEGIN-INTERNAL ... // END-INTERNAL` comments to mark regions of files that should be elided during publishing.
For example:
```javascript
function doThing() {
// BEGIN-INTERNAL
// This is a secret internal-only comment
doInternalOnlyThing();
// END-INTERNAL
}
```
With the Bazel stamping feature, pkg_npm will replace any placeholder version in your package with the actual version control tag.
See the [stamping documentation](https://github.com/bazelbuild/rules_nodejs/blob/master/docs/index.md#stamping)
Usage:
`pkg_npm` yields three labels. Build the package directory using the default label:
```sh
$ bazel build :my_package
Target //:my_package up-to-date:
bazel-out/fastbuild/bin/my_package
$ ls -R bazel-out/fastbuild/bin/my_package
```
Dry-run of publishing to npm, calling `npm pack` (it builds the package first if needed):
```sh
$ bazel run :my_package.pack
INFO: Running command line: bazel-out/fastbuild/bin/my_package.pack
my-package-name-1.2.3.tgz
$ tar -tzf my-package-name-1.2.3.tgz
```
Actually publish the package with `npm publish` (also builds first):
```sh
# Check login credentials
$ bazel run @nodejs//:npm_node_repositories who
# Publishes the package
$ bazel run :my_package.publish
```
You can pass arguments to npm by escaping them from Bazel using a double-hyphen, for example:
`bazel run my_package.publish -- --tag=next`
"""
PKG_NPM_ATTRS = {
"package_name": attr.string(
doc = """Optional package_name that this npm package may be imported as.""",
),
"srcs": attr.label_list(
doc = """Files inside this directory which are simply copied into the package.""",
allow_files = True,
),
"hide_build_files": attr.bool(
doc = """If set BUILD and BUILD.bazel files are prefixed with `_` in the npm package.
The default is True since npm packages that contain BUILD files don't work with
`yarn_install` and `npm_install` without a post-install step that deletes or renames them.
NB: Bazel has a change in https://github.com/bazelbuild/bazel/pull/10261
(expected in version 2.1) that adds .bazelignore
support for external repositories, which will make this attribute obsolete.""",
default = True,
),
"nested_packages": attr.label_list(
doc = """Other pkg_npm rules whose content is copied into this package.""",
allow_files = True,
),
"node_context_data": attr.label(
default = "@build_bazel_rules_nodejs//internal:node_context_data",
providers = [NodeContextInfo],
doc = "Internal use only",
),
"replace_with_version": attr.string(
doc = """If set this value is replaced with the version stamp data.
See the section on stamping in the README.""",
default = "0.0.0-PLACEHOLDER",
),
"substitutions": attr.string_dict(
doc = """Key-value pairs which are replaced in all the files while building the package.""",
),
"vendor_external": attr.string_list(
doc = """External workspaces whose contents should be vendored into this workspace.
Avoids 'external/foo' path segments in the resulting package.""",
),
"deps": attr.label_list(
doc = """Other targets which produce files that should be included in the package, such as `rollup_bundle`""",
allow_files = True,
),
"_npm_script_generator": attr.label(
default = Label("//internal/pkg_npm:npm_script_generator"),
cfg = "host",
executable = True,
),
"_packager": attr.label(
default = Label("//internal/pkg_npm:packager"),
cfg = "host",
executable = True,
),
"_run_npm_template": attr.label(
default = Label("@nodejs//:run_npm.sh.template"),
allow_single_file = True,
),
}
# Used in angular/angular /packages/bazel/src/ng_package/ng_package.bzl
PKG_NPM_OUTPUTS = {
"pack": "%{name}.pack",
"publish": "%{name}.publish",
}
# Takes a depset of files and returns a corresponding list of file paths without any files
# that aren't part of the specified package path. Also include files from external repositories
def _filter_out_external_files(ctx, files, package_path):
result = []
for file in files:
if file.short_path.startswith(package_path) and not file.short_path.startswith("../"):
result.append(file.path)
else:
for v in ctx.attr.vendor_external:
if file.short_path.startswith("../%s/" % v):
result.append(file.path)
return result
def create_package(ctx, deps_files, nested_packages):
stamp = ctx.attr.node_context_data[NodeContextInfo].stamp
all_files = deps_files + ctx.files.srcs
if not stamp and len(all_files) == 1 and all_files[0].is_directory and len(ctx.files.nested_packages) == 0:
package_dir = all_files[0]
_create_npm_scripts(ctx, package_dir)
return package_dir
package_dir = ctx.actions.declare_directory(ctx.label.name)
package_path = ctx.label.package
filtered_deps_sources = _filter_out_external_files(ctx, deps_files, package_path)
args = ctx.actions.args()
args.use_param_file("%s", use_always = True)
args.add(package_dir.path)
args.add(package_path)
args.add_joined([s.path for s in ctx.files.srcs], join_with = ",", omit_if_empty = False)
args.add(ctx.bin_dir.path)
args.add(ctx.genfiles_dir.path)
args.add_joined(filtered_deps_sources, join_with = ",", omit_if_empty = False)
args.add_joined([p.path for p in nested_packages], join_with = ",", omit_if_empty = False)
args.add(ctx.attr.substitutions)
args.add(ctx.attr.replace_with_version)
args.add(ctx.version_file.path if stamp else "")
args.add_joined(ctx.attr.vendor_external, join_with = ",", omit_if_empty = False)
args.add("1" if ctx.attr.hide_build_files else "0")
inputs = ctx.files.srcs + deps_files + nested_packages
if stamp:
inputs.append(ctx.version_file)
ctx.actions.run(
progress_message = "Assembling npm package %s" % package_dir.short_path,
mnemonic = "AssembleNpmPackage",
executable = ctx.executable._packager,
inputs = inputs,
outputs = [package_dir],
arguments = [args],
)
_create_npm_scripts(ctx, package_dir)
return package_dir
def _create_npm_scripts(ctx, package_dir):
args = ctx.actions.args()
args.add_all([
package_dir.path,
ctx.outputs.pack.path,
ctx.outputs.publish.path,
ctx.file._run_npm_template.path,
])
ctx.actions.run(
progress_message = "Generating npm pack & publish scripts",
mnemonic = "GenerateNpmScripts",
executable = ctx.executable._npm_script_generator,
inputs = [ctx.file._run_npm_template, package_dir],
outputs = [ctx.outputs.pack, ctx.outputs.publish],
arguments = [args],
execution_requirements = {"local": "1"},
)
def _pkg_npm(ctx):
deps_files_depsets = []
for dep in ctx.attr.deps:
deps_files_depsets.append(dep.data_runfiles.files)
deps_files_depsets.append(dep.files)
if JSNamedModuleInfo in dep:
deps_files_depsets.append(dep[JSNamedModuleInfo].sources)
if DeclarationInfo in dep:
deps_files_depsets.append(dep[DeclarationInfo].transitive_declarations)
deps_files = depset(transitive = deps_files_depsets).to_list()
package_dir = create_package(ctx, deps_files, ctx.files.nested_packages)
package_dir_depset = depset([package_dir])
result = [
DefaultInfo(
files = package_dir_depset,
runfiles = ctx.runfiles([package_dir]),
),
]
if ctx.attr.package_name:
result.append(LinkablePackageInfo(
package_name = ctx.attr.package_name,
path = package_dir.path,
files = package_dir_depset,
))
return result
pkg_npm = rule(
implementation = _pkg_npm,
attrs = PKG_NPM_ATTRS,
doc = _DOC,
outputs = PKG_NPM_OUTPUTS,
)
| true | true |
f724bd849c4133d14569f85a315c436a9c3794b3 | 3,021 | py | Python | cosmo_tester/test_suites/cluster/cluster_to_aio_agents_migration_test.py | Ilanad/cloudify-system-tests | acb31f28fade27f118c6a6c528a080376f24ca46 | [
"Apache-2.0"
] | 10 | 2016-06-26T11:05:57.000Z | 2021-11-04T11:51:50.000Z | cosmo_tester/test_suites/cluster/cluster_to_aio_agents_migration_test.py | Ilanad/cloudify-system-tests | acb31f28fade27f118c6a6c528a080376f24ca46 | [
"Apache-2.0"
] | 89 | 2015-03-19T06:20:26.000Z | 2022-01-31T09:23:35.000Z | cosmo_tester/test_suites/cluster/cluster_to_aio_agents_migration_test.py | Ilanad/cloudify-system-tests | acb31f28fade27f118c6a6c528a080376f24ca46 | [
"Apache-2.0"
] | 19 | 2015-01-21T17:13:07.000Z | 2021-06-07T08:09:51.000Z | from os.path import join
import pytest
from cosmo_tester.test_suites.agent import validate_agent
from cosmo_tester.framework.examples import get_example_deployment
from cosmo_tester.test_suites.snapshots import (
create_copy_and_restore_snapshot,
)
@pytest.mark.four_vms
def test_migrate_agents_cluster_to_aio(
three_node_cluster_with_extra_manager, module_tmpdir,
ssh_key, logger, test_config):
node1, node2, node3, aio_mgr = three_node_cluster_with_extra_manager
aio_mgr.bootstrap()
logger.info('Installing example deployment on cluster')
example = get_example_deployment(node1, ssh_key, logger,
'cluster_to_aio_agents', test_config)
example.inputs['server_ip'] = node1.ip_address
example.upload_and_verify_install()
validate_agent(node2, example, test_config)
logger.info('Creating snapshot on cluster')
snapshot_id = 'cluster_to_aio_agents'
snapshot_path = join(str(module_tmpdir), snapshot_id) + '.zip'
create_copy_and_restore_snapshot(
node1, aio_mgr, snapshot_id, snapshot_path, logger,
cert_path=aio_mgr.api_ca_path)
logger.info('Migrating to new agents, stopping old agents')
aio_mgr.run_command(
'cfy agents install --stop-old-agent --tenant-name {}'.format(
example.tenant,
)
)
logger.info('Verifying agent connectivity on AIO manager')
example.manager = aio_mgr
validate_agent(aio_mgr, example, test_config, upgrade=True)
example.uninstall()
@pytest.mark.four_vms
def test_migrate_agents_aio_to_cluster(
three_node_cluster_with_extra_manager, module_tmpdir,
ssh_key, logger, test_config):
node1, node2, node3, aio_mgr = three_node_cluster_with_extra_manager
aio_mgr.bootstrap()
logger.info('Installing example deployment on AIO manager')
example = get_example_deployment(aio_mgr, ssh_key, logger,
'aio_to_cluster_agents', test_config)
example.inputs['server_ip'] = aio_mgr.ip_address
example.upload_and_verify_install()
validate_agent(aio_mgr, example, test_config)
logger.info('Creating snapshot on AIO manager')
snapshot_id = 'aio_to_cluster_agents'
snapshot_path = join(str(module_tmpdir), snapshot_id) + '.zip'
create_copy_and_restore_snapshot(
aio_mgr, node1, snapshot_id, snapshot_path, logger,
cert_path=aio_mgr.api_ca_path)
for mgr in node1, node2, node3:
# Restart restservice to use correct rest secret
mgr.run_command('sudo supervisorctl restart cloudify-restservice')
mgr.wait_for_manager()
logger.info('Migrating to new agents, stopping old agents')
node1.run_command(
'cfy agents install --stop-old-agent --tenant-name {}'.format(
example.tenant,
)
)
logger.info('Verifying agent connectivity on cluster')
example.manager = node1
validate_agent(node3, example, test_config, upgrade=True)
example.uninstall()
| 35.541176 | 74 | 0.71996 | from os.path import join
import pytest
from cosmo_tester.test_suites.agent import validate_agent
from cosmo_tester.framework.examples import get_example_deployment
from cosmo_tester.test_suites.snapshots import (
create_copy_and_restore_snapshot,
)
@pytest.mark.four_vms
def test_migrate_agents_cluster_to_aio(
three_node_cluster_with_extra_manager, module_tmpdir,
ssh_key, logger, test_config):
node1, node2, node3, aio_mgr = three_node_cluster_with_extra_manager
aio_mgr.bootstrap()
logger.info('Installing example deployment on cluster')
example = get_example_deployment(node1, ssh_key, logger,
'cluster_to_aio_agents', test_config)
example.inputs['server_ip'] = node1.ip_address
example.upload_and_verify_install()
validate_agent(node2, example, test_config)
logger.info('Creating snapshot on cluster')
snapshot_id = 'cluster_to_aio_agents'
snapshot_path = join(str(module_tmpdir), snapshot_id) + '.zip'
create_copy_and_restore_snapshot(
node1, aio_mgr, snapshot_id, snapshot_path, logger,
cert_path=aio_mgr.api_ca_path)
logger.info('Migrating to new agents, stopping old agents')
aio_mgr.run_command(
'cfy agents install --stop-old-agent --tenant-name {}'.format(
example.tenant,
)
)
logger.info('Verifying agent connectivity on AIO manager')
example.manager = aio_mgr
validate_agent(aio_mgr, example, test_config, upgrade=True)
example.uninstall()
@pytest.mark.four_vms
def test_migrate_agents_aio_to_cluster(
three_node_cluster_with_extra_manager, module_tmpdir,
ssh_key, logger, test_config):
node1, node2, node3, aio_mgr = three_node_cluster_with_extra_manager
aio_mgr.bootstrap()
logger.info('Installing example deployment on AIO manager')
example = get_example_deployment(aio_mgr, ssh_key, logger,
'aio_to_cluster_agents', test_config)
example.inputs['server_ip'] = aio_mgr.ip_address
example.upload_and_verify_install()
validate_agent(aio_mgr, example, test_config)
logger.info('Creating snapshot on AIO manager')
snapshot_id = 'aio_to_cluster_agents'
snapshot_path = join(str(module_tmpdir), snapshot_id) + '.zip'
create_copy_and_restore_snapshot(
aio_mgr, node1, snapshot_id, snapshot_path, logger,
cert_path=aio_mgr.api_ca_path)
for mgr in node1, node2, node3:
mgr.run_command('sudo supervisorctl restart cloudify-restservice')
mgr.wait_for_manager()
logger.info('Migrating to new agents, stopping old agents')
node1.run_command(
'cfy agents install --stop-old-agent --tenant-name {}'.format(
example.tenant,
)
)
logger.info('Verifying agent connectivity on cluster')
example.manager = node1
validate_agent(node3, example, test_config, upgrade=True)
example.uninstall()
| true | true |
f724be57c123ae69e82f64587350fa4ee8ad8e02 | 44,331 | py | Python | python/test/feature_extractor_test.py | da2x/vmaf | 8ba4a4b84beb40f97fa01d902e45dde69b18b517 | [
"BSD-2-Clause-Patent"
] | null | null | null | python/test/feature_extractor_test.py | da2x/vmaf | 8ba4a4b84beb40f97fa01d902e45dde69b18b517 | [
"BSD-2-Clause-Patent"
] | null | null | null | python/test/feature_extractor_test.py | da2x/vmaf | 8ba4a4b84beb40f97fa01d902e45dde69b18b517 | [
"BSD-2-Clause-Patent"
] | null | null | null | from __future__ import absolute_import
import os
import unittest
import re
from vmaf.config import VmafConfig
from vmaf.core.feature_extractor import VmafFeatureExtractor, \
MomentFeatureExtractor, \
PsnrFeatureExtractor, SsimFeatureExtractor, MsSsimFeatureExtractor, \
VifFrameDifferenceFeatureExtractor, \
AnsnrFeatureExtractor, PypsnrFeatureExtractor, VmafIntegerFeatureExtractor
from vmaf.core.asset import Asset
from vmaf.core.result_store import FileSystemResultStore
from test.testutil import set_default_576_324_videos_for_testing, set_default_flat_1920_1080_videos_for_testing, \
set_default_576_324_10bit_videos_for_testing, set_default_576_324_12bit_videos_for_testing, \
set_default_576_324_16bit_videos_for_testing, set_default_576_324_10bit_videos_for_testing_b
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
class FeatureExtractorTest(unittest.TestCase):
def setUp(self) -> None:
self.verificationErrors = []
self.maxDiff = None
def tearDown(self):
if hasattr(self, 'fextractor'):
self.fextractor.remove_results()
pass
self.assertEqual([], self.verificationErrors)
def test_executor_id(self):
asset = Asset(dataset="test", content_id=0, asset_id=1,
ref_path="dir/refvideo.yuv", dis_path="dir/disvideo.yuv",
asset_dict={'width': 720, 'height': 480})
fextractor = VmafFeatureExtractor([asset], None)
self.assertEqual(fextractor.executor_id, "VMAF_feature_V0.2.7")
def test_get_log_file_path(self):
import hashlib
asset = Asset(dataset="test", content_id=0, asset_id=1,
ref_path="dir/refvideo.yuv", dis_path="dir/disvideo.yuv",
asset_dict={'width':720, 'height':480,},
workdir_root="my_workdir_root")
fextractor = VmafFeatureExtractor([asset], None)
log_file_path = fextractor._get_log_file_path(asset)
h = hashlib.sha1("test_0_1_refvideo_720x480_vs_disvideo_720x480_q_720x480".encode("utf-8")).hexdigest()
self.assertTrue(re.match(r"^my_workdir_root/[a-zA-Z0-9-]+/VMAF_feature_V0.2.7_{}$".format(h), log_file_path))
def test_run_vmaf_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = VmafFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.4460930625, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.9345148541666667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.9345148541666667, places=4) # at version 0.2.4b (ioannis adm fix), adm and adm2 are now identical
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.5095715208, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_num_score'], 712650.023478, places=0)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_den_score'], 1597314.95249, places=0)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_num_score'], 371.80645372916666, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_den_score'], 397.83378972916671, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_anpsnr_score'], 34.164776875, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale0_score'], 0.90791933424090698, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale1_score'], 0.8938705209242691, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale2_score'], 0.9300123587874962, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale3_score'], 0.9649663148179196, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif2_score'], 0.72722361912801026, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm3_score'], 0.9241841443734412, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.2714392708, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_num_score'], 1597314.86733, places=0)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_den_score'], 1597314.95249, places=0)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_num_score'], 397.83378972916671, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_den_score'], 397.83378972916671, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_anpsnr_score'], 41.9266444375, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm3_score'], 1.0, places=4)
def test_run_vmaf_integer_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = VmafIntegerFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_score'], 0.44642331250000006, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion_score'], 4.04982535417, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion2_score'], 3.8953518541666665, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_score'], 0.9345148541666667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 0.9345148541666667, places=4) # at version 0.2.4b (ioannis adm fix), adm and adm2 are now identical
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_ansnr_score'], 23.5095715208, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_num_score'], 713111.410502125, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_den_score'], 1597165.5464884583, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_num_score'], 371.8243668541666, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_den_score'], 397.8567857291667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_anpsnr_score'], 34.164776875, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale0_score'], 0.3636620710647402, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale1_score'], 0.7674952820232231, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale2_score'], 0.8631077727416296, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale3_score'], 0.9157200890843669, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale0_score'], 0.90791933424090698, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale1_score'], 0.8938705209242691, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale2_score'], 0.9300123587874962, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale3_score'], 0.9649663148179196, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif2_score'], 0.72749630372849, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm3_score'], 0.9241841443734412, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion_score'], 4.04982535417, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion2_score'], 3.8953518541666665, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_ansnr_score'], 31.2714392708, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_num_score'], 1597165.34910075, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_den_score'], 1597165.5464884583, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_num_score'], 397.8576817708333, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_den_score'], 397.8567857291667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_anpsnr_score'], 41.9266444375, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale0_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale1_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
@unittest.skip("vifdiff alternative needed, vmaf_feature executable deprecated")
def test_run_vif_frame_difference_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = VifFrameDifferenceFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['VifDiff_feature_vifdiff_score'], 0.26745858333333333, places=4)
self.assertAlmostEqual(results[0]['VifDiff_feature_vifdiff_num_score'], 305412.7661844375, places=0)
self.assertAlmostEqual(results[0]['VifDiff_feature_vifdiff_den_score'], 1113927.6002349583, places=0)
self.assertAlmostEqual(results[1]['VifDiff_feature_vifdiff_score'], 0.9791655833333334, places=4)
self.assertAlmostEqual(results[1]['VifDiff_feature_vifdiff_num_score'], 1113926.2941030415, places=0)
self.assertAlmostEqual(results[1]['VifDiff_feature_vifdiff_den_score'], 1113927.6002349583, places=0)
def test_run_moment_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 59.788567297525134, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 4696.668388042269, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 1121.519917231203, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 61.332006624999984, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 4798.659574041666, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 1036.837184348847, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 59.788567297525134, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 4696.668388042269, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 1121.519917231203, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 59.788567297525134, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 4696.668388042269, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 1121.519917231203, places=4)
def test_run_moment_fextractor_10bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_10bit_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 59.788567297525134 * 4, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 4696.668388042269 * 16, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 1121.519917231203 * 16, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 61.332006624999984 * 4, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 4798.659574041666 * 16, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 1036.837184348847 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 59.788567297525134 * 4, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 4696.668388042269 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 1121.519917231203 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 59.788567297525134 * 4, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 4696.668388042269 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 1121.519917231203 * 16, places=4)
def test_run_moment_fextractor_12bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_12bit_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 979.6711819844536, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 1238135.8363054413, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 278292.25886465114, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 996.2818072702333, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 1255533.4389574758, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 262952.8893540034, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 979.6711819844536, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 1238135.8363054413, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 278292.25886465114, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 979.6711819844536, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 1238135.8363054413, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 278292.25886465114, places=4)
def test_run_moment_fextractor_16bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_16bit_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 979.6711819844536 * 16.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 1238135.8363054413 * 256.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 278292.25886465114 * 256.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 996.2818072702333 * 16.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 1255533.4389574758 * 256.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 262952.8893540034 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 979.6711819844536 * 16.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 1238135.8363054413 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 278292.25886465114 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 979.6711819844536 * 16.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 1238135.8363054413 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 278292.25886465114 * 256.0, places=4)
def test_run_psnr_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = PsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['PSNR_feature_psnr_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[1]['PSNR_feature_psnr_score'], 60.0, places=4)
def test_run_ansnr_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = AnsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['ANSNR_feature_ansnr_score'], 23.509571520833333, places=4)
self.assertAlmostEqual(results[0]['ANSNR_feature_anpsnr_score'], 34.16477641666666, places=4)
self.assertAlmostEqual(results[1]['ANSNR_feature_ansnr_score'], 31.271439270833337, places=4)
self.assertAlmostEqual(results[1]['ANSNR_feature_anpsnr_score'], 41.926644187499996, places=4)
def test_run_ssim_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = SsimFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_l_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_c_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_s_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_l_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_c_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_s_score'], 1.0, places=4)
def test_run_ssim_fextractor_flat(self):
ref_path, dis_path, asset, asset_original = set_default_flat_1920_1080_videos_for_testing()
self.fextractor = SsimFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_score'], 0.9087330000000001, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_l_score'], 0.9087330000000001, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_c_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_s_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_l_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_c_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_s_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
def test_run_ms_ssim_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = MsSsimFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 0.99899612500000001, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 0.9857694375, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 0.941185875, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 0.99923564583333324, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 0.997034020833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 0.977992145833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 0.99929210416666658, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 0.999588104167, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 0.99387125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 0.99940356249999995, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 0.999907625, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 0.998222583333, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 1., places=4)
def test_run_vmaf_integer_fextractor_checkerboard(self):
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
dis_path2 = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_1_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width': 1920, 'height': 1080})
asset_original = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width': 1920, 'height': 1080})
asset2 = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path2,
asset_dict={'width': 1920, 'height': 1080})
self.fextractor = VmafIntegerFeatureExtractor(
[asset, asset_original, asset2],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_score'], 0.053996333333333334, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 0.053996333333333334, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale0_score'], 0.23738393128710478, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale1_score'], 0.08524788663335138, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale2_score'], 0.024058909404945077, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale3_score'], 0.018034879735107798, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion2_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion2_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_score'], 0.78533833333333336, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm2_score'], 0.7853384465157921, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale0_score'], 0.72132189911792899, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale1_score'], 0.69259738857522501, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale2_score'], 0.80415911639244586, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale3_score'], 0.82791889676239039, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_motion_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_motion2_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
def test_run_vmaf_integer_fextractor_flat(self):
ref_path, dis_path, asset, asset_original = set_default_flat_1920_1080_videos_for_testing()
self.fextractor = VmafIntegerFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
def test_run_psnr_fextractor_proc(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
callback_dict = {
'ref_proc_callback': 'identity',
'dis_proc_callback': 'multiply',
}
asset.asset_dict.update(callback_dict)
asset_original.asset_dict.update(callback_dict)
self.fextractor = PsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['PSNR_feature_psnr_score'], 27.645446604166665, places=8)
self.assertAlmostEqual(results[1]['PSNR_feature_psnr_score'], 31.87683660416667, places=8)
def test_run_pypsnr_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 38.449441057158786, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 40.9919102486235, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 60.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 60.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 60.0, places=4)
def test_run_pypsnr_fextractor_10bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_10bit_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 30.780573260053277, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 38.769832063651364, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.28418847734209, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 72.0, places=4)
def test_run_pypsnr_fextractor_10bit_b(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_10bit_videos_for_testing_b()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 32.57145231892744, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 39.03859552689696, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.28060001337217, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 72.0, places=4)
def test_run_pypsnr_fextractor_12bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_12bit_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 32.577817940053734, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 39.044961148023255, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.28696563449846, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 84.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 84.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 84.0, places=4)
def test_run_pypsnr_fextractor_16bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_16bit_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 32.579806240311484, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 39.046949448281005, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.288953934756215, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 108.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 108.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 108.0, places=4)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 60.561475 | 182 | 0.730121 | from __future__ import absolute_import
import os
import unittest
import re
from vmaf.config import VmafConfig
from vmaf.core.feature_extractor import VmafFeatureExtractor, \
MomentFeatureExtractor, \
PsnrFeatureExtractor, SsimFeatureExtractor, MsSsimFeatureExtractor, \
VifFrameDifferenceFeatureExtractor, \
AnsnrFeatureExtractor, PypsnrFeatureExtractor, VmafIntegerFeatureExtractor
from vmaf.core.asset import Asset
from vmaf.core.result_store import FileSystemResultStore
from test.testutil import set_default_576_324_videos_for_testing, set_default_flat_1920_1080_videos_for_testing, \
set_default_576_324_10bit_videos_for_testing, set_default_576_324_12bit_videos_for_testing, \
set_default_576_324_16bit_videos_for_testing, set_default_576_324_10bit_videos_for_testing_b
__copyright__ = "Copyright 2016-2020, Netflix, Inc."
__license__ = "BSD+Patent"
class FeatureExtractorTest(unittest.TestCase):
def setUp(self) -> None:
self.verificationErrors = []
self.maxDiff = None
def tearDown(self):
if hasattr(self, 'fextractor'):
self.fextractor.remove_results()
pass
self.assertEqual([], self.verificationErrors)
def test_executor_id(self):
asset = Asset(dataset="test", content_id=0, asset_id=1,
ref_path="dir/refvideo.yuv", dis_path="dir/disvideo.yuv",
asset_dict={'width': 720, 'height': 480})
fextractor = VmafFeatureExtractor([asset], None)
self.assertEqual(fextractor.executor_id, "VMAF_feature_V0.2.7")
def test_get_log_file_path(self):
import hashlib
asset = Asset(dataset="test", content_id=0, asset_id=1,
ref_path="dir/refvideo.yuv", dis_path="dir/disvideo.yuv",
asset_dict={'width':720, 'height':480,},
workdir_root="my_workdir_root")
fextractor = VmafFeatureExtractor([asset], None)
log_file_path = fextractor._get_log_file_path(asset)
h = hashlib.sha1("test_0_1_refvideo_720x480_vs_disvideo_720x480_q_720x480".encode("utf-8")).hexdigest()
self.assertTrue(re.match(r"^my_workdir_root/[a-zA-Z0-9-]+/VMAF_feature_V0.2.7_{}$".format(h), log_file_path))
def test_run_vmaf_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = VmafFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['VMAF_feature_vif_score'], 0.4460930625, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_motion0_score'], 0.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_score'], 0.9345148541666667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm2_score'], 0.9345148541666667, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_ansnr_score'], 23.5095715208, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_num_score'], 712650.023478, places=0)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_den_score'], 1597314.95249, places=0)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_num_score'], 371.80645372916666, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_den_score'], 397.83378972916671, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_anpsnr_score'], 34.164776875, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale0_score'], 0.363420489439, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale1_score'], 0.766647542135, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale2_score'], 0.862854666902, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif_scale3_score'], 0.915971778036, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale0_score'], 0.90791933424090698, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale1_score'], 0.8938705209242691, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale2_score'], 0.9300123587874962, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm_scale3_score'], 0.9649663148179196, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_vif2_score'], 0.72722361912801026, places=4)
self.assertAlmostEqual(results[0]['VMAF_feature_adm3_score'], 0.9241841443734412, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion_score'], 4.04982535417, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_motion2_score'], 3.8953518541666665, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_ansnr_score'], 31.2714392708, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_num_score'], 1597314.86733, places=0)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_den_score'], 1597314.95249, places=0)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_num_score'], 397.83378972916671, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_den_score'], 397.83378972916671, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_anpsnr_score'], 41.9266444375, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_vif2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_feature_adm3_score'], 1.0, places=4)
def test_run_vmaf_integer_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = VmafIntegerFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_score'], 0.44642331250000006, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion_score'], 4.04982535417, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion2_score'], 3.8953518541666665, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_score'], 0.9345148541666667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 0.9345148541666667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_ansnr_score'], 23.5095715208, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_num_score'], 713111.410502125, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_den_score'], 1597165.5464884583, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_num_score'], 371.8243668541666, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_den_score'], 397.8567857291667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_anpsnr_score'], 34.164776875, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale0_score'], 0.3636620710647402, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale1_score'], 0.7674952820232231, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale2_score'], 0.8631077727416296, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif_scale3_score'], 0.9157200890843669, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale0_score'], 0.90791933424090698, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale1_score'], 0.8938705209242691, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale2_score'], 0.9300123587874962, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale3_score'], 0.9649663148179196, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_vif2_score'], 0.72749630372849, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm3_score'], 0.9241841443734412, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion_score'], 4.04982535417, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion2_score'], 3.8953518541666665, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_ansnr_score'], 31.2714392708, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_num_score'], 1597165.34910075, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_den_score'], 1597165.5464884583, places=0)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_num_score'], 397.8576817708333, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_den_score'], 397.8567857291667, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_anpsnr_score'], 41.9266444375, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale0_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale1_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif_scale3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_vif2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
@unittest.skip("vifdiff alternative needed, vmaf_feature executable deprecated")
def test_run_vif_frame_difference_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = VifFrameDifferenceFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['VifDiff_feature_vifdiff_score'], 0.26745858333333333, places=4)
self.assertAlmostEqual(results[0]['VifDiff_feature_vifdiff_num_score'], 305412.7661844375, places=0)
self.assertAlmostEqual(results[0]['VifDiff_feature_vifdiff_den_score'], 1113927.6002349583, places=0)
self.assertAlmostEqual(results[1]['VifDiff_feature_vifdiff_score'], 0.9791655833333334, places=4)
self.assertAlmostEqual(results[1]['VifDiff_feature_vifdiff_num_score'], 1113926.2941030415, places=0)
self.assertAlmostEqual(results[1]['VifDiff_feature_vifdiff_den_score'], 1113927.6002349583, places=0)
def test_run_moment_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 59.788567297525134, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 4696.668388042269, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 1121.519917231203, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 61.332006624999984, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 4798.659574041666, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 1036.837184348847, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 59.788567297525134, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 4696.668388042269, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 1121.519917231203, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 59.788567297525134, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 4696.668388042269, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 1121.519917231203, places=4)
def test_run_moment_fextractor_10bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_10bit_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 59.788567297525134 * 4, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 4696.668388042269 * 16, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 1121.519917231203 * 16, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 61.332006624999984 * 4, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 4798.659574041666 * 16, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 1036.837184348847 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 59.788567297525134 * 4, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 4696.668388042269 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 1121.519917231203 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 59.788567297525134 * 4, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 4696.668388042269 * 16, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 1121.519917231203 * 16, places=4)
def test_run_moment_fextractor_12bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_12bit_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 979.6711819844536, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 1238135.8363054413, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 278292.25886465114, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 996.2818072702333, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 1255533.4389574758, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 262952.8893540034, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 979.6711819844536, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 1238135.8363054413, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 278292.25886465114, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 979.6711819844536, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 1238135.8363054413, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 278292.25886465114, places=4)
def test_run_moment_fextractor_16bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_16bit_videos_for_testing()
self.fextractor = MomentFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Moment_feature_ref1st_score'], 979.6711819844536 * 16.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_ref2nd_score'], 1238135.8363054413 * 256.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_refvar_score'], 278292.25886465114 * 256.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis1st_score'], 996.2818072702333 * 16.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_dis2nd_score'], 1255533.4389574758 * 256.0, places=4)
self.assertAlmostEqual(results[0]['Moment_feature_disvar_score'], 262952.8893540034 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref1st_score'], 979.6711819844536 * 16.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_ref2nd_score'], 1238135.8363054413 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_refvar_score'], 278292.25886465114 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis1st_score'], 979.6711819844536 * 16.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_dis2nd_score'], 1238135.8363054413 * 256.0, places=4)
self.assertAlmostEqual(results[1]['Moment_feature_disvar_score'], 278292.25886465114 * 256.0, places=4)
def test_run_psnr_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = PsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['PSNR_feature_psnr_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[1]['PSNR_feature_psnr_score'], 60.0, places=4)
def test_run_ansnr_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = AnsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['ANSNR_feature_ansnr_score'], 23.509571520833333, places=4)
self.assertAlmostEqual(results[0]['ANSNR_feature_anpsnr_score'], 34.16477641666666, places=4)
self.assertAlmostEqual(results[1]['ANSNR_feature_ansnr_score'], 31.271439270833337, places=4)
self.assertAlmostEqual(results[1]['ANSNR_feature_anpsnr_score'], 41.926644187499996, places=4)
def test_run_ssim_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = SsimFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_score'], 0.86322654166666657, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_l_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_c_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['SSIM_feature_ssim_s_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_l_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_c_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['SSIM_feature_ssim_s_score'], 1.0, places=4)
def test_run_ssim_fextractor_flat(self):
ref_path, dis_path, asset, asset_original = set_default_flat_1920_1080_videos_for_testing()
self.fextractor = SsimFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_score'], 0.9087330000000001, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_l_score'], 0.9087330000000001, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_c_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['SSIM_feature_ssim_s_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_l_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_c_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['SSIM_feature_ssim_s_score'], 1.0, places=8)
except AssertionError as e: self.verificationErrors.append(str(e))
def test_run_ms_ssim_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = MsSsimFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_score'], 0.9632498125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 0.9981474583333334, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 0.96126793750000006, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 0.89773633333333336, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 0.99899612500000001, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 0.9857694375, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 0.941185875, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 0.99923564583333324, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 0.997034020833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 0.977992145833, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 0.99929210416666658, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 0.999588104167, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 0.99387125, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 0.99940356249999995, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 0.999907625, places=4)
self.assertAlmostEqual(results[0]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 0.998222583333, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale0_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale1_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale2_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale3_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_l_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_c_scale4_score'], 1., places=4)
self.assertAlmostEqual(results[1]['MS_SSIM_feature_ms_ssim_s_scale4_score'], 1., places=4)
def test_run_vmaf_integer_fextractor_checkerboard(self):
ref_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_0_0.yuv")
dis_path = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_10_0.yuv")
dis_path2 = VmafConfig.test_resource_path("yuv", "checkerboard_1920_1080_10_3_1_0.yuv")
asset = Asset(dataset="test", content_id=0, asset_id=0,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path,
asset_dict={'width': 1920, 'height': 1080})
asset_original = Asset(dataset="test", content_id=0, asset_id=1,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=ref_path,
asset_dict={'width': 1920, 'height': 1080})
asset2 = Asset(dataset="test", content_id=0, asset_id=2,
workdir_root=VmafConfig.workdir_path(),
ref_path=ref_path,
dis_path=dis_path2,
asset_dict={'width': 1920, 'height': 1080})
self.fextractor = VmafIntegerFeatureExtractor(
[asset, asset_original, asset2],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_score'], 0.053996333333333334, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 0.053996333333333334, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale0_score'], 0.23738393128710478, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale1_score'], 0.08524788663335138, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale2_score'], 0.024058909404945077, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale3_score'], 0.018034879735107798, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[0]['VMAF_integer_feature_motion2_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[1]['VMAF_integer_feature_motion2_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_score'], 0.78533833333333336, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm2_score'], 0.7853384465157921, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale0_score'], 0.72132189911792899, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale1_score'], 0.69259738857522501, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale2_score'], 0.80415911639244586, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_adm_scale3_score'], 0.82791889676239039, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_motion_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
try: self.assertAlmostEqual(results[2]['VMAF_integer_feature_motion2_score'], 12.554711666666668, places=4)
except AssertionError as e: self.verificationErrors.append(str(e))
def test_run_vmaf_integer_fextractor_flat(self):
ref_path, dis_path, asset, asset_original = set_default_flat_1920_1080_videos_for_testing()
self.fextractor = VmafIntegerFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[0]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale0_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale1_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale2_score'], 1.0, places=4)
self.assertAlmostEqual(results[1]['VMAF_integer_feature_adm_scale3_score'], 1.0, places=4)
def test_run_psnr_fextractor_proc(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
callback_dict = {
'ref_proc_callback': 'identity',
'dis_proc_callback': 'multiply',
}
asset.asset_dict.update(callback_dict)
asset_original.asset_dict.update(callback_dict)
self.fextractor = PsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=False,
result_store=None,
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['PSNR_feature_psnr_score'], 27.645446604166665, places=8)
self.assertAlmostEqual(results[1]['PSNR_feature_psnr_score'], 31.87683660416667, places=8)
def test_run_pypsnr_fextractor(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 30.755063979166664, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 38.449441057158786, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 40.9919102486235, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 60.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 60.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 60.0, places=4)
def test_run_pypsnr_fextractor_10bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_10bit_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 30.780573260053277, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 38.769832063651364, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.28418847734209, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 72.0, places=4)
def test_run_pypsnr_fextractor_10bit_b(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_10bit_videos_for_testing_b()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 32.57145231892744, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 39.03859552689696, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.28060001337217, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 72.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 72.0, places=4)
def test_run_pypsnr_fextractor_12bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_12bit_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 32.577817940053734, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 39.044961148023255, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.28696563449846, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 84.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 84.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 84.0, places=4)
def test_run_pypsnr_fextractor_16bit(self):
ref_path, dis_path, asset, asset_original = set_default_576_324_16bit_videos_for_testing()
self.fextractor = PypsnrFeatureExtractor(
[asset, asset_original],
None, fifo_mode=True,
result_store=None
)
self.fextractor.run(parallelize=True)
results = self.fextractor.results
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnry_score'], 32.579806240311484, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnru_score'], 39.046949448281005, places=4)
self.assertAlmostEqual(results[0]['Pypsnr_feature_psnrv_score'], 41.288953934756215, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnry_score'], 108.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnru_score'], 108.0, places=4)
self.assertAlmostEqual(results[1]['Pypsnr_feature_psnrv_score'], 108.0, places=4)
if __name__ == '__main__':
unittest.main(verbosity=2)
| true | true |
f724bec965759ccd317b2b385268f2ab47cb4ab2 | 1,838 | py | Python | scripts/convert_protocols_to_exams.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | null | null | null | scripts/convert_protocols_to_exams.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | 1 | 2022-02-17T20:28:19.000Z | 2022-02-17T20:28:19.000Z | scripts/convert_protocols_to_exams.py | timptner/farafmb.de | 2b154278d8b44ea3adecafcb8554c1b0b0055e01 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import json
from pathlib import Path
def get_valid_file_path(file_path: str) -> Path:
"""Check if file exists and return valid Path object"""
path = Path(file_path).resolve()
if not path.is_file():
raise Exception("No file found! Please check your path and try again.")
return path
def convert_data(data: list) -> list:
"""Convert fixture to new format"""
print(f"Found {len(data)} entries, updating ... ", end='')
for item in data:
item['model'] = 'exams.exam'
fields: dict = item['fields']
fields['minute_author'] = fields.pop('author')
fields['minute_file'] = fields.pop('file')
fields['submitted_on'] = fields.pop('submitted')
fields['is_archived'] = False
print('Done!')
return data
def get_valid_folder_path(folder_path: str) -> Path:
"""Check if folder exists and return valid Path object"""
path = Path(folder_path).resolve()
if not path.parent.is_dir():
raise Exception("No folder found! Please check your path and try again.")
return path
def main():
"""Main entry-point for script"""
source = input("Please specify a file path where the dump file can be found.\n> ")
path = get_valid_file_path(source)
data: list = json.loads(path.read_text())
data = convert_data(data)
destination = input("Please specify a folder path where the new dump file should be stored.\n> ")
path = get_valid_folder_path(destination)
file = path / 'exams.json'
if file.exists():
raise Exception("File 'exams.json' already exists! Please move or delete the existing file first.")
else:
(path / 'exams.json').write_text(json.dumps(data, ensure_ascii=False))
print("New file 'exams.json' created!")
if __name__ == '__main__':
main()
| 31.689655 | 107 | 0.654516 |
import json
from pathlib import Path
def get_valid_file_path(file_path: str) -> Path:
path = Path(file_path).resolve()
if not path.is_file():
raise Exception("No file found! Please check your path and try again.")
return path
def convert_data(data: list) -> list:
print(f"Found {len(data)} entries, updating ... ", end='')
for item in data:
item['model'] = 'exams.exam'
fields: dict = item['fields']
fields['minute_author'] = fields.pop('author')
fields['minute_file'] = fields.pop('file')
fields['submitted_on'] = fields.pop('submitted')
fields['is_archived'] = False
print('Done!')
return data
def get_valid_folder_path(folder_path: str) -> Path:
path = Path(folder_path).resolve()
if not path.parent.is_dir():
raise Exception("No folder found! Please check your path and try again.")
return path
def main():
source = input("Please specify a file path where the dump file can be found.\n> ")
path = get_valid_file_path(source)
data: list = json.loads(path.read_text())
data = convert_data(data)
destination = input("Please specify a folder path where the new dump file should be stored.\n> ")
path = get_valid_folder_path(destination)
file = path / 'exams.json'
if file.exists():
raise Exception("File 'exams.json' already exists! Please move or delete the existing file first.")
else:
(path / 'exams.json').write_text(json.dumps(data, ensure_ascii=False))
print("New file 'exams.json' created!")
if __name__ == '__main__':
main()
| true | true |
f724bf1ecded89830c078e1879e035935da3e2ed | 6,858 | py | Python | doc/user-manual/conf.py | phadej/agda | 2fa8ede09451d43647f918dbfb24ff7b27c52edc | [
"BSD-3-Clause"
] | null | null | null | doc/user-manual/conf.py | phadej/agda | 2fa8ede09451d43647f918dbfb24ff7b27c52edc | [
"BSD-3-Clause"
] | null | null | null | doc/user-manual/conf.py | phadej/agda | 2fa8ede09451d43647f918dbfb24ff7b27c52edc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'Agda'
copyright = u'''2005-2018 remains with the authors.
Agda 2 was originally written by Ulf Norell,
partially based on code from Agda 1 by Catarina Coquand and Makoto Takeyama,
and from Agdalight by Ulf Norell and Andreas Abel.
Agda 2 is currently actively developed mainly by Andreas Abel,
Guillaume Allais, Jesper Cockx, Nils Anders Danielsson, Philipp
Hausmann, Fredrik Nordvall Forsberg, Ulf Norell, Víctor López Juan,
Andrés Sicard-Ramírez, and Andrea Vezzosi.
Further, Agda 2 has received contributions by, amongst others, Stevan
Andjelkovic, Marcin Benke, Jean-Philippe Bernardy, Guillaume Brunerie,
James Chapman, Dominique Devriese, Péter Diviánszki, Olle Fredriksson,
Adam Gundry, Daniel Gustafsson, Kuen-Bang Hou (favonia), Patrik
Jansson, Alan Jeffrey, Wolfram Kahl, Wen Kokke, John Leo, Fredrik Lindblad,
Francesco Mazzoli, Stefan Monnier, Darin Morrison, Guilhem Moulin,
Nicolas Pouillard, Benjamin Price, Nobuo Yamashita, Christian Sattler,
Makoto Takeyama and Tesla Ice Zhang. The full list of contributors is
available at https://github.com/agda/agda/graphs/contributors'''
author = u'The Agda Team'
# The short X.Y version
version = '2.6.0'
# The full version, including alpha/beta/rc tags
release = version
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# If your change the version here also change it in the
# `requirements.txt` file [Issue #1936].
needs_sphinx = '1.8.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.imgmath',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.lagda.rst','.rst']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'Agda'
# -- Options for HTML output -------------------------------------------------
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Agdadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_additional_files = ["mystyle.sty"]
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': r'''
% Customised setup for certain characters.
\usepackage{amsmath}
\usepackage{bbm}
\usepackage{mathtools}
\usepackage{stmaryrd}
\usepackage{pifont}
\usepackage{keystroke}
\input{unicode-symbols-sphinx.tex.txt}
''',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_additional_files = ["unicode-symbols-sphinx.tex.txt"]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Agda.tex', u'Agda User Manual', u'The Agda Team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agda', 'Agda Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Agda', 'Agda Documentation',
author, 'Agda', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| 31.897674 | 79 | 0.681248 |
project = 'Agda'
copyright = u'''2005-2018 remains with the authors.
Agda 2 was originally written by Ulf Norell,
partially based on code from Agda 1 by Catarina Coquand and Makoto Takeyama,
and from Agdalight by Ulf Norell and Andreas Abel.
Agda 2 is currently actively developed mainly by Andreas Abel,
Guillaume Allais, Jesper Cockx, Nils Anders Danielsson, Philipp
Hausmann, Fredrik Nordvall Forsberg, Ulf Norell, Víctor López Juan,
Andrés Sicard-Ramírez, and Andrea Vezzosi.
Further, Agda 2 has received contributions by, amongst others, Stevan
Andjelkovic, Marcin Benke, Jean-Philippe Bernardy, Guillaume Brunerie,
James Chapman, Dominique Devriese, Péter Diviánszki, Olle Fredriksson,
Adam Gundry, Daniel Gustafsson, Kuen-Bang Hou (favonia), Patrik
Jansson, Alan Jeffrey, Wolfram Kahl, Wen Kokke, John Leo, Fredrik Lindblad,
Francesco Mazzoli, Stefan Monnier, Darin Morrison, Guilhem Moulin,
Nicolas Pouillard, Benjamin Price, Nobuo Yamashita, Christian Sattler,
Makoto Takeyama and Tesla Ice Zhang. The full list of contributors is
available at https://github.com/agda/agda/graphs/contributors'''
author = u'The Agda Team'
version = '2.6.0'
release = version
sphinx = '1.8.3'
extensions = [
'sphinx.ext.imgmath',
]
templates_path = ['_templates']
source_suffix = ['.lagda.rst','.rst']
master_doc = 'index'
language = None
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
pygments_style = 'sphinx'
highlight_language = 'Agda'
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Agdadoc'
# -- Options for LaTeX output ------------------------------------------------
latex_additional_files = ["mystyle.sty"]
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
'preamble': r'''
% Customised setup for certain characters.
\usepackage{amsmath}
\usepackage{bbm}
\usepackage{mathtools}
\usepackage{stmaryrd}
\usepackage{pifont}
\usepackage{keystroke}
\input{unicode-symbols-sphinx.tex.txt}
''',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
latex_additional_files = ["unicode-symbols-sphinx.tex.txt"]
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Agda.tex', u'Agda User Manual', u'The Agda Team', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'agda', 'Agda Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Agda', 'Agda Documentation',
author, 'Agda', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.