input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
"""Create a new data feed.
Create a new data feed.
:param body: parameters to create a data feed.
:type body: ~azure.ai.metricsadvisor.models.DataFeedDetail
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create_data_feed.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'DataFeedDetail')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['Location']=self._deserialize('str', response.headers.get('Location'))
if cls:
return cls(pipeline_response, None, response_headers)
create_data_feed.metadata = {'url': '/dataFeeds'} # type: ignore
async def get_data_feed_by_id(
self,
data_feed_id: str,
**kwargs: Any
) -> "_models.DataFeedDetail":
"""Get a data feed by its id.
Get a data feed by its id.
:param data_feed_id: The data feed unique id.
:type data_feed_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataFeedDetail, or the result of cls(response)
:rtype: ~azure.ai.metricsadvisor.models.DataFeedDetail
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataFeedDetail"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_data_feed_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'dataFeedId': self._serialize.url("data_feed_id", data_feed_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DataFeedDetail', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_data_feed_by_id.metadata = {'url': '/dataFeeds/{dataFeedId}'} # type: ignore
async def update_data_feed(
self,
data_feed_id: str,
body: Any,
**kwargs: Any
) -> "_models.DataFeedDetail":
"""Update a data feed.
Update a data feed.
:param data_feed_id: The data feed unique id.
:type data_feed_id: str
:param body: parameters to update a data feed.
:type body: any
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DataFeedDetail, or the result of cls(response)
:rtype: ~azure.ai.metricsadvisor.models.DataFeedDetail
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DataFeedDetail"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop("content_type", "application/merge-patch+json")
accept = "application/json"
# Construct URL
url = self.update_data_feed.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'dataFeedId': self._serialize.url("data_feed_id", data_feed_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'object')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('DataFeedDetail', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_data_feed.metadata = {'url': '/dataFeeds/{dataFeedId}'} # type: ignore
async def delete_data_feed(
self,
data_feed_id: str,
**kwargs: Any
) -> None:
"""Delete a data feed.
Delete a data feed.
:param data_feed_id: The data feed unique id.
:type data_feed_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: None, or the result of cls(response)
:rtype: None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.delete_data_feed.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'dataFeedId': self._serialize.url("data_feed_id", data_feed_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
if cls:
return cls(pipeline_response, None, {})
delete_data_feed.metadata = {'url': '/dataFeeds/{dataFeedId}'} # type: ignore
async def get_metric_feedback(
self,
feedback_id: str,
**kwargs: Any
) -> "_models.MetricFeedback":
"""Get a metric feedback by its id.
Get a metric feedback by its id.
:param feedback_id: the unique feedback ID.
:type feedback_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: MetricFeedback, or the result of cls(response)
:rtype: ~azure.ai.metricsadvisor.models.MetricFeedback
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricFeedback"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
accept = "application/json"
# Construct URL
url = self.get_metric_feedback.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'feedbackId': self._serialize.url("feedback_id", feedback_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
raise HttpResponseError(response=response, model=error)
deserialized = self._deserialize('MetricFeedback', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_metric_feedback.metadata = {'url': '/feedback/metric/{feedbackId}'} # type: ignore
def list_metric_feedbacks(
self,
body: "_models.MetricFeedbackFilter",
skip: Optional[int] = None,
maxpagesize: Optional[int] = None,
**kwargs: Any
) -> AsyncIterable["_models.MetricFeedbackList"]:
"""List feedback on the given metric.
List feedback on the given metric.
:param body: metric feedback filter.
:type body: ~azure.ai.metricsadvisor.models.MetricFeedbackFilter
:param skip: for paging, skipped number.
:type skip: int
:param maxpagesize: the maximum number of items in one page.
:type maxpagesize: int
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either MetricFeedbackList or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.ai.metricsadvisor.models.MetricFeedbackList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.MetricFeedbackList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = "application/json"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_metric_feedbacks.metadata['url'] # type: ignore
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if skip is not None:
query_parameters['$skip'] = self._serialize.query("skip", skip, 'int')
if maxpagesize is not None:
query_parameters['$maxpagesize'] = self._serialize.query("maxpagesize", maxpagesize, 'int')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MetricFeedbackFilter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
else:
url = '{nextLink}' # FIXME: manually edited; was '/{nextLink}'
path_format_arguments = {
'endpoint': self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True),
'nextLink': self._serialize.url("next_link", next_link, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(body, 'MetricFeedbackFilter')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('MetricFeedbackList', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorCode, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_metric_feedbacks.metadata = {'url': '/feedback/metric/query'} # type: ignore
async def create_metric_feedback(
| |
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
self.data = m.get('data')
return self
class QueryLeaseRealpersonRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
certify_id: str = None,
outer_order_no: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 实人认证的唯一标识
self.certify_id = certify_id
# 商户请求的唯一标识。 值为 32 位长度的字母数字组合。其中,前面几位字符是商户自定义的简称,中间几位可以使用一段时间,后段可以使用一个随机或递增序列。该值也可以使用 UUID
self.outer_order_no = outer_order_no
def validate(self):
self.validate_required(self.certify_id, 'certify_id')
self.validate_required(self.outer_order_no, 'outer_order_no')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.certify_id is not None:
result['certify_id'] = self.certify_id
if self.outer_order_no is not None:
result['outer_order_no'] = self.outer_order_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('certify_id') is not None:
self.certify_id = m.get('certify_id')
if m.get('outer_order_no') is not None:
self.outer_order_no = m.get('outer_order_no')
return self
class QueryLeaseRealpersonResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 业务返回字段,JSON格式
self.data = data
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
self.data = m.get('data')
return self
class QueryBaiOcrRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
app_key: str = None,
ocr_type: str = None,
source_type: str = None,
source_base_64: str = None,
source_config_side: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 调用业务方身份标识,指明调用来源
self.app_key = app_key
# OCR服务的业务类型,目前支持:
# businessLicense,营业执照识别
# idCard,身份证识别
# bankCard,银行卡识别
# VATInvoice,增值税发票识别
self.ocr_type = ocr_type
# 请求的资源类型,目前支持:
# image,图片
# pdf,PDF复印件
self.source_type = source_type
# 图片或PDF等内容的base64内容字符串
self.source_base_64 = source_base_64
# 资源的附加属性
# 如针对身份证识别,需要指定face(人像面)或back(国徽面)
self.source_config_side = source_config_side
def validate(self):
self.validate_required(self.app_key, 'app_key')
self.validate_required(self.ocr_type, 'ocr_type')
self.validate_required(self.source_type, 'source_type')
self.validate_required(self.source_base_64, 'source_base_64')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.app_key is not None:
result['app_key'] = self.app_key
if self.ocr_type is not None:
result['ocr_type'] = self.ocr_type
if self.source_type is not None:
result['source_type'] = self.source_type
if self.source_base_64 is not None:
result['source_base64'] = self.source_base_64
if self.source_config_side is not None:
result['source_config_side'] = self.source_config_side
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('app_key') is not None:
self.app_key = m.get('app_key')
if m.get('ocr_type') is not None:
self.ocr_type = m.get('ocr_type')
if m.get('source_type') is not None:
self.source_type = m.get('source_type')
if m.get('source_base64') is not None:
self.source_base_64 = m.get('source_base64')
if m.get('source_config_side') is not None:
self.source_config_side = m.get('source_config_side')
return self
class QueryBaiOcrResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
data: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 返回结果体,JSON字符串
self.data = data
def validate(self):
pass
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.data is not None:
result['data'] = self.data
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('data') is not None:
self.data = m.get('data')
return self
class QueryIotplatformPurchaseorderRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
tenant_id: str = None,
serial_number: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 租户id
self.tenant_id = tenant_id
# 设备串号
self.serial_number = serial_number
def validate(self):
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.serial_number, 'serial_number')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.serial_number is not None:
result['serial_number'] = self.serial_number
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('serial_number') is not None:
self.serial_number = m.get('serial_number')
return self
class QueryIotplatformPurchaseorderResponse(TeaModel):
def __init__(
self,
req_msg_id: str = None,
result_code: str = None,
result_msg: str = None,
goods_from_chain: bool = None,
purchase_time: str = None,
lease_name: str = None,
purchase_order_price: str = None,
goods_name: str = None,
):
# 请求唯一ID,用于链路跟踪和问题排查
self.req_msg_id = req_msg_id
# 结果码,一般OK表示调用成功
self.result_code = result_code
# 异常信息的文本描述
self.result_msg = result_msg
# 是否为链上采购商品,true:是,false:否
self.goods_from_chain = goods_from_chain
# 采购时间
self.purchase_time = purchase_time
# 采购商名称
self.lease_name = lease_name
# 采购价
self.purchase_order_price = purchase_order_price
# 商品名称
self.goods_name = goods_name
def validate(self):
if self.purchase_time is not None:
self.validate_pattern(self.purchase_time, 'purchase_time', '\\d{4}[-]\\d{1,2}[-]\\d{1,2}[T]\\d{2}:\\d{2}:\\d{2}([Z]|([\\.]\\d{1,9})?[\\+]\\d{2}[\\:]?\\d{2})')
def to_map(self):
result = dict()
if self.req_msg_id is not None:
result['req_msg_id'] = self.req_msg_id
if self.result_code is not None:
result['result_code'] = self.result_code
if self.result_msg is not None:
result['result_msg'] = self.result_msg
if self.goods_from_chain is not None:
result['goods_from_chain'] = self.goods_from_chain
if self.purchase_time is not None:
result['purchase_time'] = self.purchase_time
if self.lease_name is not None:
result['lease_name'] = self.lease_name
if self.purchase_order_price is not None:
result['purchase_order_price'] = self.purchase_order_price
if self.goods_name is not None:
result['goods_name'] = self.goods_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('req_msg_id') is not None:
self.req_msg_id = m.get('req_msg_id')
if m.get('result_code') is not None:
self.result_code = m.get('result_code')
if m.get('result_msg') is not None:
self.result_msg = m.get('result_msg')
if m.get('goods_from_chain') is not None:
self.goods_from_chain = m.get('goods_from_chain')
if m.get('purchase_time') is not None:
self.purchase_time = m.get('purchase_time')
if m.get('lease_name') is not None:
self.lease_name = m.get('lease_name')
if m.get('purchase_order_price') is not None:
self.purchase_order_price = m.get('purchase_order_price')
if m.get('goods_name') is not None:
self.goods_name = m.get('goods_name')
return self
class ImportIotplatformMeshidRequest(TeaModel):
def __init__(
self,
auth_token: str = None,
product_instance_id: str = None,
tenant_id: str = None,
company_name: str = None,
mesh_id: str = None,
device_sn: str = None,
type: str = None,
agent_name: str = None,
):
# OAuth模式下的授权token
self.auth_token = auth_token
self.product_instance_id = product_instance_id
# 客户的金融云租户ID
self.tenant_id = tenant_id
# 客户的公司名称
self.company_name = company_name
# 设备按照MeshAgent后获取的MeshId
self.mesh_id = mesh_id
# 客户自定义的业务上使用的设备标识,需要与租赁业务上使用的ID进行对应
self.device_sn = device_sn
# 设备类型字段
self.type = type
# 代理商名称,用于二级代理模式
self.agent_name = agent_name
def validate(self):
self.validate_required(self.tenant_id, 'tenant_id')
self.validate_required(self.company_name, 'company_name')
self.validate_required(self.mesh_id, 'mesh_id')
self.validate_required(self.device_sn, 'device_sn')
def to_map(self):
result = dict()
if self.auth_token is not None:
result['auth_token'] = self.auth_token
if self.product_instance_id is not None:
result['product_instance_id'] = self.product_instance_id
if self.tenant_id is not None:
result['tenant_id'] = self.tenant_id
if self.company_name is not None:
result['company_name'] = self.company_name
if self.mesh_id is not None:
result['mesh_id'] = self.mesh_id
if self.device_sn is not None:
result['device_sn'] = self.device_sn
if self.type is not None:
result['type'] = self.type
if self.agent_name is not None:
result['agent_name'] = self.agent_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('auth_token') is not None:
self.auth_token = m.get('auth_token')
if m.get('product_instance_id') is not None:
self.product_instance_id = m.get('product_instance_id')
if m.get('tenant_id') is not None:
self.tenant_id = m.get('tenant_id')
if m.get('company_name') is not None:
self.company_name = m.get('company_name')
if m.get('mesh_id') is not None:
self.mesh_id = m.get('mesh_id')
if m.get('device_sn') is not None:
self.device_sn = m.get('device_sn')
if m.get('type') is not None:
| |
#!/usr/bin/env python3
import base64
import getpass
import os
import pprint
import shutil
import sys
import time
from contextlib import suppress
from pathlib import Path
from time import sleep
from typing import Dict, List
import psutil
from broker import cfg
from broker._utils import _log
from broker._utils._log import br, log, ok
from broker._utils.tools import _remove, exit_after, mkdirs, pid_exists, read_json
from broker._utils.web3_tools import get_tx_status
from broker.config import env
from broker.errors import QuietExit
from broker.imports import connect
from broker.lib import (
calculate_size,
eblocbroker_function_call,
is_dir,
remove_files,
run,
run_stdout_to_file,
state,
subprocess_call,
)
from broker.libs import _git, eudat, gdrive, slurm
from broker.utils import (
WHERE,
StorageID,
byte_to_mb,
bytes32_to_ipfs,
eth_address_to_md5,
is_dir_empty,
print_tb,
read_file,
remove_empty_files_and_folders,
)
ipfs = cfg.ipfs
Ebb = cfg.Ebb
connect()
class Common:
"""Prevent 'Class' to have attribute 'method' as mypy warnings."""
def __init__(self) -> None:
self.requester_home_path = Path("")
self.results_folder: Path = Path("")
self.results_folder_prev: Path = Path("")
self.patch_file: Path = Path("")
self.requester_gpg_fingerprint: str = ""
self.patch_upload_fn = ""
self.data_transfer_out: int = 0
@exit_after(900)
def _get_tx_status(self, tx_hash):
get_tx_status(tx_hash)
def initialize(self):
pass
class Ipfs(Common):
def upload(self, *_):
"""Upload nothing."""
return
class IpfsGPG(Common):
def upload(self, *_):
"""Upload files right after all the patchings are completed."""
try:
from_gpg_fingerprint = ipfs.get_gpg_fingerprint(env.GMAIL).upper()
ipfs.gpg_encrypt(from_gpg_fingerprint, self.requester_gpg_fingerprint, self.patch_file)
except Exception as e:
_remove(self.patch_file)
raise e
class Eudat(Common):
def __init__(self) -> None:
self.encoded_share_tokens = {} # type: Dict[str, str]
self.patch_dir: Path = Path("")
def initialize(self):
with suppress(Exception):
eudat.login(env.OC_USER, env.LOG_PATH.joinpath(".eudat_client.txt"), env.OC_CLIENT)
try:
self.get_shared_tokens()
except Exception as e:
print_tb(e)
raise e
def upload(self, code_hash, *_):
with suppress(Exception): # first time uploading
uploaded_file_size = eudat.get_size(f_name=f"{code_hash}/{self.patch_upload_fn}")
size_in_bytes = calculate_size(self.patch_file, _type="bytes")
if uploaded_file_size == float(size_in_bytes):
log(f"==> {self.patch_file} is already uploaded")
return
try:
_data_transfer_out = calculate_size(self.patch_file)
log(f"==> {br(code_hash)}.data_transfer_out={_data_transfer_out}MB")
self.data_transfer_out += _data_transfer_out
eudat.upload_results(
self.encoded_share_tokens[code_hash], self.patch_upload_fn, self.patch_dir, max_retries=5
)
except Exception as e:
raise e
class Gdrive(Common):
def upload(self, key, is_job_key):
"""Upload generated result into gdrive.
:param key: key of the shared gdrive file
:returns: True if upload is successful
"""
try:
if not is_job_key:
meta_data = gdrive.get_data_key_ids(self.results_folder_prev)
key = meta_data[key]
cmd = [env.GDRIVE, "info", "--bytes", key, "-c", env.GDRIVE_METADATA]
gdrive_info = subprocess_call(cmd, 5, sleep_time=30)
except Exception as e:
raise Exception(f"{WHERE(1)} E: {key} does not have a match. meta_data={meta_data}. {e}") from e
mime_type = gdrive.get_file_info(gdrive_info, "Mime")
log(f"mime_type=[magenta]{mime_type}", "bold")
self.data_transfer_out += calculate_size(self.patch_file)
log(f"data_transfer_out={self.data_transfer_out} MB =>" f" rounded={int(self.data_transfer_out)} MB", "bold")
if "folder" in mime_type:
cmd = [env.GDRIVE, "upload", "--parent", key, self.patch_file, "-c", env.GDRIVE_METADATA]
elif "gzip" in mime_type or "/zip" in mime_type:
cmd = [env.GDRIVE, "update", key, self.patch_file, "-c", env.GDRIVE_METADATA]
else:
raise Exception("Files could not be uploaded")
try:
log(subprocess_call(cmd, 5))
except Exception as e:
print_tb(e)
raise Exception("gdrive could not upload the file") from e
class ENDCODE(IpfsGPG, Ipfs, Eudat, Gdrive):
def __init__(self, **kwargs) -> None:
args = " ".join(["{!r}".format(v) for k, v in kwargs.items()])
self.job_key: str = kwargs.pop("job_key")
self.index = int(kwargs.pop("index"))
self.received_bn: int = kwargs.pop("received_bn")
self.folder_name: str = kwargs.pop("folder_name")
self.slurm_job_id: int = kwargs.pop("slurm_job_id")
self.share_tokens = {} # type: Dict[str, str]
self.requester_id_address = ""
self.data_transfer_in = 0
self.data_transfer_out = 0
self.elapsed_time = 0
self.code_hashes_to_process: List[str] = []
self.code_hashes: List[str] = []
self.result_ipfs_hash: str = ""
self.requester_gpg_fingerprint: str = ""
self.end_timestamp = ""
self.modified_date = None
self.encoded_share_tokens = {} # type: Dict[str, str]
#: Set environment variables: https://stackoverflow.com/a/5971326/2402577
os.environ["IPFS_PATH"] = str(env.HOME.joinpath(".ipfs"))
_log.ll.LOG_FILENAME = Path(env.LOG_PATH) / "end_code_output" / f"{self.job_key}_{self.index}.log"
self.job_id: int = 0 # TODO: should be mapped to slurm_job_id
log(f"{env.EBLOCPATH}/broker/end_code.py {args}", "bold blue", is_code=True)
log(f"==> slurm_job_id={self.slurm_job_id}")
if self.job_key == self.index:
log("E: Given key and index are equal to each other")
sys.exit(1)
try:
self.job_info = eblocbroker_function_call(
lambda: Ebb.get_job_info(
env.PROVIDER_ID,
self.job_key,
self.index,
self.job_id,
self.received_bn,
),
max_retries=10,
)
self.storage_ids = self.job_info["cloudStorageID"]
requester_id = self.job_info["job_owner"]
self.requester_id_address = eth_address_to_md5(requester_id)
self.requester_info = Ebb.get_requester_info(requester_id)
except Exception as e:
log(f"E: {e}")
sys.exit(1)
self.requester_home_path = env.PROGRAM_PATH / self.requester_id_address
self.results_folder_prev: Path = self.requester_home_path / f"{self.job_key}_{self.index}"
self.results_folder = self.results_folder_prev / "JOB_TO_RUN"
if not is_dir(self.results_folder) and not is_dir(self.results_folder_prev):
sys.exit(1)
self.results_data_link = Path(self.results_folder_prev) / "data_link"
self.results_data_folder = Path(self.results_folder_prev) / "data"
self.private_dir = Path(env.PROGRAM_PATH) / self.requester_id_address / "cache"
self.patch_dir = Path(self.results_folder_prev) / "patch"
self.patch_dir_ipfs = Path(self.results_folder_prev) / "patch_ipfs"
mkdirs([self.patch_dir, self.patch_dir_ipfs])
remove_empty_files_and_folders(self.results_folder)
log(f"==> whoami={getpass.getuser()} | id={os.getegid()}")
log(f"==> home={env.HOME}")
log(f"==> pwd={os.getcwd()}")
log(f"==> results_folder={self.results_folder}")
log(f"==> provider_id={env.PROVIDER_ID}")
log(f"==> job_key={self.job_key}")
log(f"==> index={self.index}")
log(f"==> storage_ids={self.storage_ids}")
log(f"==> folder_name=[white]{self.folder_name}")
log(f"==> requester_id_address={self.requester_id_address}")
log(f"==> received={self.job_info['received']}")
self.job_state_running_pid = Ebb.mongo_broker.get_job_state_running_pid(self.job_key, self.index)
with suppress(Exception):
p = psutil.Process(int(self.job_state_running_pid))
log(p)
while True:
if not pid_exists(self.job_state_running_pid):
break
else:
log("#> job_state_running() is still running, sleeping for 15 seconds")
sleep(15)
self.job_state_running_tx = Ebb.mongo_broker.get_job_state_running_tx(self.job_key, self.index)
log(f"==> job_state_running_tx={self.job_state_running_tx}")
def get_shared_tokens(self):
with suppress(Exception):
share_ids = read_json(f"{self.private_dir}/{self.job_key}_share_id.json")
for source_code_hash in self.code_hashes_to_process:
try:
share_token = share_ids[source_code_hash]["share_token"]
self.share_tokens[source_code_hash] = share_token
self.encoded_share_tokens[source_code_hash] = base64.b64encode(
(f"{share_token}:").encode("utf-8")
).decode("utf-8")
except KeyError:
try:
shared_id = Ebb.mongo_broker.find_shareid_item(f"{self.job_key}_{self.requester_id_address[:16]}")
share_token = shared_id["share_token"]
self.share_tokens[source_code_hash] = share_token
self.encoded_share_tokens[source_code_hash] = base64.b64encode(
(f"{share_token}:").encode("utf-8")
).decode("utf-8")
except Exception as e:
log(f"E: share_id cannot be detected from key={self.job_key}")
raise e
for key in share_ids:
value = share_ids[key]
try:
encoded_value = self.encoded_share_tokens[key]
except:
_share_token = share_ids[key]["share_token"]
encoded_value = base64.b64encode((f"{_share_token}:").encode("utf-8")).decode("utf-8")
log(f"## shared_tokens: {key} => {value['share_token']} | encoded={encoded_value}")
def get_cloud_storage_class(self, _id):
"""Return cloud storage used for the id of the data."""
if self.storage_ids[_id] == StorageID.IPFS:
return Ipfs
if self.storage_ids[_id] == StorageID.IPFS_GPG:
return IpfsGPG
if self.storage_ids[_id] == StorageID.EUDAT:
return Eudat
if self.storage_ids[_id] == StorageID.GDRIVE:
return Gdrive
raise Exception(f"corresponding storage_id_class={self.storage_ids[_id]} does not exist")
def set_code_hashes_to_process(self):
for idx, code_hash in enumerate(self.code_hashes):
if self.storage_ids[idx] in [StorageID.IPFS, StorageID.IPFS_GPG]:
ipfs_hash = bytes32_to_ipfs(code_hash)
self.code_hashes_to_process.append(ipfs_hash)
else:
self.code_hashes_to_process.append(cfg.w3.toText(code_hash))
def _ipfs_add_folder(self, folder_path):
try:
self.result_ipfs_hash = ipfs.add(folder_path)
log(f"==> result_ipfs_hash={self.result_ipfs_hash}")
ipfs.pin(self.result_ipfs_hash)
data_transfer_out = ipfs.get_cumulative_size(self.result_ipfs_hash)
except Exception as e:
print_tb(e)
raise e
data_transfer_out = byte_to_mb(data_transfer_out)
self.data_transfer_out += data_transfer_out
def process_payment_tx(self):
try:
tx_hash = Ebb.process_payment(
self.job_key,
self.index,
self.job_id,
self.elapsed_time,
self.result_ipfs_hash,
self.storage_ids,
self.end_timestamp,
self.data_transfer_in,
self.data_transfer_out,
self.job_info["core"],
self.job_info["run_time"],
self.received_bn,
)
except Exception as e:
print_tb(e)
sys.exit(1)
log(f"==> [white]process_payment {self.job_key} {self.index}")
return tx_hash
def clean_before_upload(self) -> None:
remove_files(f"{self.results_folder}/.node-xmlhttprequest*")
def remove_source_code(self) -> None:
"""Client's initial downloaded files are removed."""
timestamp_fn = f"{self.results_folder_prev}/timestamp.txt"
try:
cmd = ["find", self.results_folder, "-type", "f", "!", "-newer", timestamp_fn]
files_to_remove = run(cmd)
if files_to_remove:
log(f"## Files to be removed: \n{files_to_remove}\n")
except Exception as e:
print_tb(e)
sys.exit()
run(["find", self.results_folder, "-type", "f", "!", "-newer", timestamp_fn, "-delete"])
def git_diff_patch_and_upload(self, source_fn: Path, name, storage_class, is_job_key):
if is_job_key:
log(f"==> base_patch={self.patch_dir}")
log(f"==> source_code_patch={name}")
else:
log(f"==> datafile_patch={name}")
try:
if storage_class is Ipfs or storage_class is IpfsGPG:
target_path = self.patch_dir_ipfs
else:
target_path = self.patch_dir
self.patch_upload_fn, self.patch_file, is_file_empty = _git.diff_patch(
source_fn, name, self.index, target_path, self.requester_home_path
)
if not is_file_empty:
try:
storage_class.upload(self, name, is_job_key)
except Exception as e:
print_tb(e)
raise e
except Exception as e:
print_tb(e)
raise Exception("Problem on the git_diff_patch_and_upload() function") from e
def upload_driver(self):
self.clean_before_upload()
try:
storage_class = self.get_cloud_storage_class(0)
self.git_diff_patch_and_upload(self.results_folder, self.job_key, storage_class, is_job_key=True)
except Exception as e:
raise e
for idx, name in enumerate(self.code_hashes_to_process[1:], 1):
# starting from 1st index for data files
try:
if not self.storage_ids[idx] == StorageID.NONE:
storage_class = self.get_cloud_storage_class(idx)
self.git_diff_patch_and_upload(
self.results_data_folder / name, name, storage_class, is_job_key=False
)
else:
pass
except Exception as e:
print_tb(e)
raise e
if not is_dir_empty(self.patch_dir_ipfs):
# it will upload files after all the patchings are completed
# in case any file is created via ipfs
self._ipfs_add_folder(self.patch_dir_ipfs)
def sacct_result(self):
"""Return sacct output for the job.
CPUTime = NCPUS * Elapsed
To get stats about real CPU usage you need to look at SystemCPU and
UserCPU, but the docs warns that it only measure CPU time for the parent
process and not for child processes.
"""
slurm_log_output_fn = f"{self.results_folder}/slurm_job_info.out"
slurm_log_output_fn_temp = f"{self.results_folder}/slurm_job_info.out~"
cmd = ["sacct", "-X", "--job", self.slurm_job_id, "--format"]
cmd.append("jobID,jobname,user,account,group,cluster,allocCPUS,REQMEM,TotalCPU,elapsed")
run_stdout_to_file(cmd, slurm_log_output_fn)
with open(slurm_log_output_fn, "a") as f:
f.write("\n\n")
cmd.pop()
cmd.append("NNodes,NTasks,ncpus,CPUTime,State,ExitCode,End,CPUTime,MaxRSS")
run_stdout_to_file(cmd, slurm_log_output_fn, mode="a")
with open(slurm_log_output_fn, "a") as f:
f.write("\n")
shutil.move(slurm_log_output_fn, slurm_log_output_fn_temp)
open(slurm_log_output_fn, "w").close()
with open(slurm_log_output_fn_temp) as f1, open(slurm_log_output_fn, "w") as f2:
line = f1.read().strip()
if "--" in line:
line = line.replace("-", "=")
f2.write(line)
os.remove(slurm_log_output_fn_temp)
def get_job_info(self, is_print=False, is_log_print=True) -> None:
self.job_info = eblocbroker_function_call(
lambda: Ebb.get_job_info(
env.PROVIDER_ID,
self.job_key,
self.index,
self.job_id,
self.received_bn,
is_print=is_print,
is_log_print=is_log_print,
),
max_retries=1,
)
def attemp_get_job_info(self):
is_print = True
sleep_time = 30
for attempt in range(10):
# log(self.job_info)
if self.job_info["stateCode"] == state.code["RUNNING"]:
# it will come here eventually, when setJob() is deployed. Wait
# until does values updated on the blockchain
log("## job has been started")
return
if self.job_info["stateCode"] == state.code["COMPLETED"]:
# detects an error on the slurm side
log("warning: job is already completed and its money is received")
self.get_job_info()
raise QuietExit
try:
self.job_info = Ebb.get_job_info(
env.PROVIDER_ID, self.job_key, self.index, self.job_id, self.received_bn, is_print
)
is_print = False
except Exception as e:
print_tb(e)
# sys.exit(1)
# sleep here so this loop | |
import io
import logging
import os
import textwrap
from base64 import b64decode
from timeit import default_timer as timer
import coloredlogs
import discord
import humanize
import nltk
from discord.ext import commands
from dotenv import load_dotenv
from nltk.tokenize import sent_tokenize
from pathvalidate import sanitize_filename
from profanity_check import predict_prob
from wamp_http import NoBackendAvailable
from wamp_http import request as wamp_request
CHANNELS = {"nsfw": "MLP|NSFW|", "sfw": "MLP|SFW|"}
MIN_CHARACTERS = 150
MAX_CHARACTERS = 700
TEXT_MIN_CHARACTERS = 20
TEXT_MAX_CHARACTERS = 300
GPT2_DELAYS = {
"Admin": 1,
"Drone": 15,
"Tech": 15,
"Supporter": 10,
"@everyone": 30,
}
VOICE_DELAYS = {
"Admin": 1,
"Drone": 5,
"Tech": 5,
"Supporter": 10,
"@everyone": 30,
}
GPT2_DM_ENABLED = {
"Admin": True,
"Drone": True,
"Tech": True,
"Supporter": False,
"@everyone": False,
}
GPT2_NSFW_ENABLED = {
"Admin": True,
"Drone": True,
"Tech": True,
"Supporter": False,
"@everyone": False,
}
GPT2_SELECT_MODEL = {
"Admin": True,
"Drone": False,
"Tech": False,
"Supporter": False,
"@everyone": False,
}
MAX_MSG_LENGTH = 800
USER_STORY_TIME = dict()
USER_VOICE_TIME = dict()
logger = logging.getLogger(__name__)
coloredlogs.install(level="INFO")
load_dotenv()
token = os.getenv("DISCORD_TOKEN")
nltk.download("punkt")
bot = commands.Bot(command_prefix="!")
GUILD = None
def clear_expired():
current_time = timer()
for user in list(USER_STORY_TIME):
last_story_time, timeout = USER_STORY_TIME[user]
if current_time - last_story_time > timeout:
del USER_STORY_TIME[user]
def clear_expired_voice():
current_time = timer()
for user in list(USER_VOICE_TIME):
last_story_time, timeout = USER_VOICE_TIME[user]
if current_time - last_story_time > timeout:
del USER_VOICE_TIME[user]
@bot.event
async def on_ready():
logger.info(f"{bot.user} has connected to Discord!")
global GUILD
GUILD = bot.get_guild(670866322619498507)
"""
@bot.event
async def on_message(message):
if message.channel.name in ['sfw', 'nsfw']:
await bot.process_commands(message)
"""
@bot.command(
name="pstory",
help="""
Same as !story but allows to set special generation params, use as "!pstory temperature=value&top_p=value&top_k=value&penalty=value text".
**Temperature** makes the text more diverse as it increases, may either generate interesting stories or devolve into total chaos. Default temperature is 0.85. Accepted values [0.1, 1.0], try a step of by 0.01.
**Top-k** selects tokens(i.e. words) from a list of K most likely tokens. Works good for some inputs but not so great for 'narrow distribution', i.e. where one token is much more likely to be used than any other. Accepted values [5, 40].
**Top-p** select tokens from a list cut of tokens above specific probability value P. Works good for 'narrow distribution' but may limit choices for 'broad distribution'. Accepted values [0.1, 1.0], try a step of by 0.01.
You can only set either top_p or top_k. By default top_p of 0.9 is used.
**Penalty** helps to stop model from looping (i.e. repeateg the same text over and over) on low temperatures. Accepted values [0.1, 1.0]
You can read more at https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277
If you DM the bot you can also provide **model** parameter to select between 'sfw' and 'nsfw' models.
Here are some good starting points.
* temperature=0.85&top_p=0.9 - Good starting point, high temperature for diverse results with top-p sampling.
* temperature=0.3&top_p=0.9&=penalty=0.85 - Same as above but prevent looping at lower temperature by using repetition penalty.
* temperature=0.3&top_k=40&=penalty=0.85 - Now use top-k sampling, may or may not be better than top-k depending on input.
There are no 'best' values that we've discovered so far, quality of story depends on these parameters but also length of user prompt and amount of model training. Please experiment and report back any good values!""",
)
async def pstory(ctx, *, prompt: str):
data = {}
params, text = prompt.split(" ", 1)
if "=" in params:
prompt = text
data = dict(item.split("=") for item in params.split("&"))
"""
try:
settings = params.split("&")
for setting in settings:
name, value = setting.split("=")
data[name] = value
except e:
await ctx.send("Sorry, can't parse the params.")
return
"""
print(data)
# temperature = float(data.get("temperature", 0.85))
temperature = data.get("temperature")
# top_p = float(data.get("top_p", 0.85))
top_p = float(data.get("top_p", 0))
# top_k = int(data.get("top_k", 0))
top_k = int(data.get("top_k", 0))
penalty = float(data.get("penalty", 0))
requested_model = data.get("model")
if top_k > 0 and top_p > 0:
await ctx.send("Sorry, can't set top_k and top_p at the same time.")
return
if "top_k" in data and (top_k < 5 or top_k > 40):
await ctx.send("Sorry, wrong 'top_k' value, expected [5, 40]")
return
if temperature:
temperature = float(temperature)
if temperature < 0.1 or temperature > 1.0:
await ctx.send("Sorry, wrong 'temperature' value, expected [0.1, 1.0]")
return
if "penalty" in data and (penalty < 0.1 or penalty > 1.0):
await ctx.send("Sorry, wrong 'penalty' value, expected [0.1, 1.0]")
return
if "top_p" in data and (top_p < 0.1 or top_p > 1.0):
await ctx.send("Sorry, wrong 'top_p' value, expected [0.1, 1.0]")
return
await gen_story(
ctx,
prompt,
temperature=temperature,
top_p=top_p,
top_k=top_k,
penalty=penalty,
requested_model=requested_model,
)
@bot.command(
name="say",
help="Generate a voice clip using Twilight Sparkle's Tacotron2+WaveGlow model from Pony Preservation Project",
)
async def say(ctx: discord.ext.commands.Context, *, prompt: str):
if len(prompt) < TEXT_MIN_CHARACTERS:
response = (
f"Sorry, prompt must be at least {TEXT_MIN_CHARACTERS} characters, got %s"
% len(prompt)
)
await ctx.send(response)
return
if len(prompt) > TEXT_MAX_CHARACTERS:
response = (
f"Sorry, prompt must be less than {TEXT_MAX_CHARACTERS} characters, got %s"
% len(prompt)
)
await ctx.send(response)
return
profanity_score = predict_prob([prompt])[0]
if profanity_score > 0.3:
response = f"Sorry, profanity score is too high: {profanity_score:.3f}"
await ctx.send(response)
return
msg = f"<@{ctx.author.id}> Got it, working on a voice sample for you!"
await ctx.send(msg)
path = f"com.purplesmart.router.api"
data = {"query": prompt, "method": "tts/v1", "kernel": None, "params": {}}
try:
resp = (await wamp_request(path, data))["args"][0]
except NoBackendAvailable as e:
logging.exception(f"Generation error: {e}")
await ctx.send(f"{e}")
return
except Exception:
response = "Sorry, I crashed!\n"
logging.exception("Generation error")
await ctx.send(response)
return
output = resp["output"]
sound = io.BytesIO(b64decode(output["generated_data"]))
# sound = io.BytesIO(resp["data"])
filename = sanitize_filename(prompt).lower()[:40]
file = discord.File(sound, filename="%s.ogg" % filename)
embed = discord.Embed(title="A message from Twilight")
arpabet = output["generated_text"] or "no arphabet data"
embed.add_field(name="Arpabet", value=arpabet)
await ctx.send(file=file, embed=embed)
@bot.command(name="story", help="Give me a prompt, get a story")
async def story(ctx, *, prompt: str):
await gen_story(ctx, prompt)
SENTENCE_END = frozenset({".", "!", "?"})
def remove_last_sentence(text: str):
if text[-1] in SENTENCE_END:
return text
paragraphs = text.split("\n\n")
paragraphs = [p for p in paragraphs if p]
last_paragraph = paragraphs.pop()
last_paragraph = " ".join(sent_tokenize(last_paragraph)[:-1])
paragraphs.append(last_paragraph)
return "\n\n".join(paragraphs)
async def gen_story(
ctx: discord.ext.commands.Context,
prompt,
temperature=None,
top_p=None,
top_k=None,
penalty=None,
requested_model=None,
):
forced_model = None
is_owner = await ctx.bot.is_owner(ctx.author)
if ctx.guild is None:
# Direct Message
member = GUILD.get_member(ctx.author.id)
if not member:
response = (
"Sorry, you need to be a member of the AskPonyAi channel to DM the bot."
)
await ctx.send(response)
return
roles = member.roles
else:
roles = ctx.author.roles
can_select_model = max(
filter(
lambda x: x is not None,
[GPT2_SELECT_MODEL.get(role.name) for role in roles],
)
)
if requested_model is not None:
if can_select_model:
forced_model = requested_model
else:
response = "Sorry, you can only select the model when you DM."
await ctx.send(response)
return
if ctx.guild is None:
can_use_dm = max(
filter(
lambda x: x is not None,
[GPT2_DM_ENABLED.get(role.name) for role in roles],
)
)
if not is_owner and not can_use_dm:
response = "Sorry, DM based story generation is not yet available."
await ctx.send(response)
return
timeout_for_role = min(
filter(lambda x: x is not None, [GPT2_DELAYS.get(role.name) for role in roles])
)
try:
if len(prompt) < MIN_CHARACTERS:
response = (
f"Sorry, prompt must be at least {MIN_CHARACTERS} characters, got %s"
% len(prompt)
)
await ctx.send(response)
return
if len(prompt) > MAX_CHARACTERS:
response = (
f"Sorry, prompt must be less than {MAX_CHARACTERS} characters, got %s"
% len(prompt)
)
await ctx.send(response)
return
clear_expired()
start = timer()
last_story = None
if ctx.author.id in USER_STORY_TIME:
last_story, timeout = USER_STORY_TIME[ctx.author.id]
if last_story and start - last_story < timeout:
wait_for = humanize.naturaldelta(timeout - (start - last_story))
await ctx.send(
f"<@{<EMAIL>}> Cooldown active, please wait {wait_for}."
)
return
profanity_score = predict_prob([prompt])[0]
if profanity_score > 0.15 and str(ctx.channel) != "nsfw":
response = f"Sorry, profanity score is too high: {profanity_score}"
await ctx.send(response)
return
USER_STORY_TIME[ctx.author.id] = (start, timeout_for_role)
data = {}
if top_p and top_p > 0:
data["top_p"] = top_p
if top_k and top_k > 0:
data["top_k"] = top_k
if penalty and penalty > 0:
data["penalize"] = penalty
if temperature and temperature > 0:
data["temperature"] = temperature
data["max_length"] = 300
if ctx.guild is None:
if requested_model is not None:
if requested_model not in CHANNELS.keys():
response = "Sorry, you can only select the model when you DM."
await ctx.send(response)
return
else:
inference_model = CHANNELS[requested_model]
else:
inference_model = CHANNELS["sfw"]
else:
if forced_model:
inference_model = CHANNELS.get(forced_model)
else:
inference_model = CHANNELS.get(str(ctx.channel))
if not inference_model:
response = "Sorry, I only operate in [%s] channels." % ", ".join(
[k for k in CHANNELS.keys()]
)
await ctx.send(response)
return
msg = f"<@{ctx.author.id}> Got it, working on a | |
Reason: Error connecting to SlashNext Cloud'
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 0:
msg = 'Test Connectivity Successful'
self.save_progress(msg)
return action_result.set_status(phantom.APP_SUCCESS)
# If there is an error then return the exact error message
else:
self.save_progress('Test Connectivity Failed')
msg = 'Error Reason: {0}'.format(response['errorMsg'])
return action_result.set_status(phantom.APP_ERROR, msg)
def _handle_api_quota(self, param):
# Saving action progress
self.save_progress('In action handler for: {0}'.format(self.get_action_identifier()))
# Adding input parameters to the action results
action_result = self.add_action_result(ActionResult(dict(param)))
# Return success
msg = 'Coming Soon...'
self.save_progress(msg)
return action_result.set_status(phantom.APP_SUCCESS, msg)
def _handle_host_reputation(self, param):
# Saving action progress
self.save_progress('In action handler for: {0}'.format(self.get_action_identifier()))
# Adding input parameters to the action results
action_result = self.add_action_result(ActionResult(dict(param)))
# Accessing action parameters passed in the 'param' dictionary
# Required values can be accessed directly
host = param['host']
# Populate the API parameter dictionary
ep_params = {
'authkey': self._api_key,
'host': host
}
# Make rest API call
ret_val, response = self._make_rest_call(
HOST_REPUTE_API, action_result, method='post', params=ep_params, headers=None)
# Server did not return status code: 200, return error
if phantom.is_fail(ret_val):
msg = 'Host Reputation Failed, Error Reason: Error connecting to SlashNext Cloud'
self.save_progress(msg)
action_result.update_summary({
'State': 'Connection Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 0:
msg = 'Host Reputation Successful'
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'Reputation Fetched',
'Verdict': response['threatData']['verdict']
})
return action_result.set_status(phantom.APP_SUCCESS, msg)
# If there is an error then return the exact error message
else:
msg = 'Host Reputation Failed, Error Reason: {0}'.format(response['errorMsg'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'API Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
def _handle_host_urls(self, param):
# Saving action progress
self.save_progress('In action handler for: {0}'.format(self.get_action_identifier()))
# Adding input parameters to the action results
action_result = self.add_action_result(ActionResult(dict(param)))
# Accessing action parameters passed in the 'param' dictionary
# Required values can be accessed directly
host = param['host']
# Optional values should use the .get() function
limit = param.get('limit', 10)
# Populate the API parameter dictionary
ep_params = {
'authkey': self._api_key,
'host': host,
'page': 1,
'rpp': limit
}
# Make rest API call
ret_val, response = self._make_rest_call(
HOST_REPORT_API, action_result, method='post', params=ep_params, headers=None)
# Server did not return status code: 200, return error
if phantom.is_fail(ret_val):
msg = 'Host URLs Failed, Error Reason: Error connecting to SlashNext Cloud'
self.save_progress(msg)
action_result.update_summary({
'State': 'Connection Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 0:
msg = 'Host URLs Successful'
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'URLs Fetched',
'URLs Found': len(response['urlDataList'])
})
return action_result.set_status(phantom.APP_SUCCESS, msg)
# If there is an error then return the exact error message
else:
msg = 'Host URLs Failed, Error Reason: {0}'.format(response['errorMsg'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'API Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
def _handle_host_report(self, param):
# Saving action progress
self.save_progress('In action handler for: {0}'.format(self.get_action_identifier()))
# Adding input parameters to the action results
action_result = self.add_action_result(ActionResult(dict(param)))
# Accessing action parameters passed in the 'param' dictionary
# Required values can be accessed directly
host = param['host']
# --------------------------- Host Reputation ---------------------------
# Populate the API parameter dictionary
ep_params = {
'authkey': self._api_key,
'host': host
}
# Make rest API call
ret_val, response = self._make_rest_call(
HOST_REPUTE_API, action_result, method='post', params=ep_params, headers=None)
# Server did not return status code: 200, return error
if phantom.is_fail(ret_val):
msg = 'Host Reputation Failed, Error Reason: Error connecting to SlashNext Cloud'
self.save_progress(msg)
action_result.update_summary({
'State': 'Connection Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 0:
self.save_progress('Host Report Successful')
action_result.add_data(response)
action_result.update_summary({
'State': 'Report Fetched',
'Verdict': response['threatData']['verdict']
})
if response.get('threatData').get('verdict').startswith('Unrated'):
msg = 'Host Reputation Returned: {0}'.format(response.get('threatData').get('verdict'))
self.save_progress(msg)
return action_result.set_status(phantom.APP_SUCCESS, msg)
# If there is an error then return the exact error message
else:
msg = 'Host Reputation Failed, Error Reason: {0}'.format(response['errorMsg'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'API Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# --------------------------- Host Report ---------------------------
# Populate the API parameter dictionary
ep_params = {
'authkey': self._api_key,
'host': host,
'page': 1,
'rpp': 1
}
# Make rest API call
ret_val, response = self._make_rest_call(
HOST_REPORT_API, action_result, method='post', params=ep_params, headers=None)
# Server did not return status code: 200, return error
if phantom.is_fail(ret_val):
msg = 'Host URLs Failed, Error Reason: Error connecting to SlashNext Cloud'
self.save_progress(msg)
action_result.update_summary({
'State': 'Connection Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 0:
self.save_progress('Host URLs Successful')
first_url = response['urlDataList'][0]
latest_url = first_url['url']
latest_url_scanid = str(first_url['scanId'])
# Perform a URL scan if there exists no Scan ID for the URL
if latest_url_scanid == 'N/A':
# --------------------------- URL Scan Sync ---------------------------
# Populate the API parameter dictionary
ep_params = {
'authkey': self._api_key,
'url': latest_url
}
# Make rest API call
ret_val, response = self._make_rest_call(
URL_SCANSYNC_API, action_result, method='post', params=ep_params, headers=None)
# Server did not return status code: 200, return error
if phantom.is_fail(ret_val):
msg = 'URL Synchronous Scan Failed, Error Reason: Error connecting to SlashNext Cloud'
self.save_progress(msg)
action_result.update_summary({
'State': 'Connection Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 0:
self.save_progress('URL Synchronous Scan Successful')
action_result.add_data(response)
# If there is landing URL available, get its forensics instead
if 'landingUrl' in response['urlData']:
# Set the Scan ID to landing URL if it exists
latest_url_scanid = response['urlData']['landingUrl']['scanId']
else:
# Otherwise set it to the scanned URL's scan ID
latest_url_scanid = response['urlData']['scanId']
# If there is an error then return the exact error message
else:
msg = 'URL Synchronous Scan Failed, Error Reason: {0}'.format(response['errorMsg'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'API Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
else:
# If there is landing URL available, get its forensics instead
if 'landingUrl' in first_url and first_url['landingUrl']['scanId'] != 'N/A':
latest_url_scanid = first_url['landingUrl']['scanId']
# Add the result of the Host Report
action_result.add_data(response)
# If there is an error then return the exact error message
else:
msg = 'Host URLs Failed, Error Reason: {0}'.format(response['errorMsg'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'API Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# --------------------------- Forensics Data ---------------------------
# Calling the function to collectively download screenshot, HTML and text data
msg = 'Host Report Successful'
if response.get('swlData') is None:
self._download_forensics(action_result, latest_url_scanid, msg)
else:
return action_result.set_status(phantom.APP_SUCCESS, msg)
def _handle_url_scan(self, param):
# Saving action progress
self.save_progress('In action handler for: {0}'.format(self.get_action_identifier()))
# Adding input parameters to the action results
action_result = self.add_action_result(ActionResult(dict(param)))
# Accessing action parameters passed in the 'param' dictionary
# Required values can be accessed directly
url = param['url']
# Optional values should use the .get() function
extended_info = param.get('extended_info', False)
# Populate the API parameter dictionary
ep_params = {
'authkey': self._api_key,
'url': url
}
# Make rest API call
ret_val, response = self._make_rest_call(
URL_SCAN_API, action_result, method='post', params=ep_params, headers=None)
# Server did not return status code: 200, return error
if phantom.is_fail(ret_val):
msg = 'URL Scan Failed, Error Reason: Error connecting to SlashNext Cloud'
self.save_progress(msg)
action_result.update_summary({
'State': 'Connection Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
# Return success
elif response['errorNo'] == 1:
msg = 'Your URL Scan request is submitted to the cloud and may take up-to 60 seconds to complete.\n' \
'Please check back later using "scan report" action with Scan ID = {0} or '\
'running the same "url scan" action one more time'.format(response['urlData']['scanId'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'Pending, Retry'
})
return action_result.set_status(phantom.APP_SUCCESS, msg)
# Return success
elif response['errorNo'] == 0:
msg = 'URL Scan Successful'
self.save_progress(msg)
action_result.add_data(response)
# Check to see if there is a landing URL so that correct verdict is added
if response['urlData'].get('landingUrl') is None:
verdict = response['urlData']['threatData']['verdict']
else:
verdict = response['urlData']['landingUrl']['threatData']['verdict']
action_result.update_summary({
'State': 'Scan Completed',
'Verdict': verdict
})
# Download the detailed forensics data if extended_info parameter is True
if extended_info and response.get('swlData') is None:
self.save_progress('Downloading Forensics Data')
# If there is landing URL available, get its forensics instead
if 'landingUrl' in response['urlData']:
url_scanid = response['urlData']['landingUrl']['scanId']
else:
url_scanid = response['urlData']['scanId']
self._download_forensics(action_result, url_scanid, msg)
else:
return action_result.set_status(phantom.APP_SUCCESS, msg)
# If there is an error then return the exact error message
else:
msg = 'URL Scan Failed, Error Reason: {0}'.format(response['errorMsg'])
self.save_progress(msg)
action_result.add_data(response)
action_result.update_summary({
'State': 'API Error'
})
return action_result.set_status(phantom.APP_ERROR, msg)
def _handle_url_scan_sync(self, param):
# Saving action progress
self.save_progress('In action handler for: {0}'.format(self.get_action_identifier()))
self.save_progress('With parameters: {0}'.format(param))
# Adding input parameters to the action results
action_result = self.add_action_result(ActionResult(dict(param)))
# Accessing action parameters passed in the 'param' | |
<reponame>jaescalo/cli-global-traffic-manager
#!/usr/bin/python
# DISCLAIMER:
"""
This script is for demo purposes only which provides customers with programming information regarding the Developer APIs. This script is supplied "AS IS" without any warranties and support.
We assume no responsibility or liability for the use of the script, convey no license or title under any patent or copyright.
We reserve the right to make changes in the script without notification and make no representation or warranty that such application will be suitable for the specified use without further testing or modification.
"""
# USAGE:
"""
usage: gtm_tool.py [--version] ...
Global Traffic Manager Tools
optional arguments:
--version show program's version number and exit
Commands:
help Show available help
search Search for an IP Address, FQDN, CNAME handout in all GTM
properties
show Show a GTM property details
update Modify and activate a property
Example #1: Find a server name or IP in all GTM properties
$ python3 gtm_tool.py search --value secret.origin.com
Example #2: Get a property details
$ python3 gtm_tool.py show --property www.example.com.akadns.net
Example #3: Update a property by turning ON/OFF a Data Center and activate.
$ python3 gtm_tool.py update --property www.example.com.akadns.net --datacenter Dallas --state ON
Example #4: Clone a property and change its data center, server and property name.
python3 gtm_tool.py clone --property www.example.com.akadns.net --datacenter Santiago --server 172.16.31.10 --new_property weighted --key F-AC-788308
"""
import requests, json, sys, os, time, re
from akamai.edgegrid import EdgeGridAuth,EdgeRc
import urllib
import subprocess
import argparse
import logging
if sys.version_info[0] < 3:
from urlparse import urljoin
else:
from urllib.parse import urljoin
class MyArgumentParser(argparse.ArgumentParser):
def error(self, message):
self.print_help(sys.stderr)
self.exit(0, '%s: error: %s\n' % (self.prog, message))
# Initialization of section and edgerc.
def init_config(edgerc_file, section):
global baseurl, session
# Check if the edgerc_file variable or the AKAMAI_EDGERC env var exist then use a default value if they don't exist.
if not edgerc_file:
if not os.getenv("AKAMAI_EDGERC"):
edgerc_file = os.path.join(os.path.expanduser("~"), '.edgerc')
else:
edgerc_file = os.getenv("AKAMAI_EDGERC")
if not os.access(edgerc_file, os.R_OK):
print("Unable to read edgerc file \"%s\"" % edgerc_file)
exit(1)
if not section:
if not os.getenv("AKAMAI_EDGERC_SECTION"):
section = "default"
else:
section = os.getenv("AKAMAI_EDGERC_SECTION")
try:
edgerc = EdgeRc(edgerc_file)
baseurl = 'https://%s' % edgerc.get(section, 'host')
session = requests.Session()
session.auth = EdgeGridAuth.from_edgerc(edgerc, section)
return(baseurl, session)
except configparser.NoSectionError:
print("Edgerc section \"%s\" not found" % section)
exit(1)
except Exception:
print("Unknown error occurred trying to read edgerc file (%s)" % edgerc_file)
exit(1)
# Function to get a list of all the GTM domains
def gtm_domains(accountKey_unique):
domains = []
api_endpoint = urljoin(baseurl, '/config-gtm/v1/domains/' + accountKey_unique)
logging.info('API Endpoint: ' + api_endpoint)
response = session.get(api_endpoint)
# Convert JSON response to a dictionary for better management
dict_response = json.loads(response.text)
# Caputure all the domain names, i.e. ['items'][n]['name']
for entry in dict_response['items']:
domains.append(entry['name'])
return(domains)
# Function to get GTM property details, and capture the server names or IPs.
def gtm_domain_properties(domain_name, accountKey_unique):
print(domain_name)
api_endpoint = urljoin(baseurl, '/config-gtm/v1/domains/' + domain_name + accountKey_unique)
logging.info('API Endpoint: ' + api_endpoint)
response = session.get(api_endpoint)
dict_response = json.loads(response.text)
# Handle case for domains that contain a property type 'asmapping'. Send the "Accept: application/vnd.config-gtm.v1.1+json" request header.
if response.status_code == 406:
headers = {'Accept': str(dict_response['minimumMediaTypeRequired'])}
api_endpoint = urljoin(baseurl, '/config-gtm/v1/domains/' + domain_name + accountKey_unique)
logging.info('API Endpoint: ' + api_endpoint)
response = session.get(api_endpoint, headers=headers)
dict_response = json.loads(response.text)
return(dict_response)
# Function to extract the traffic targets from each GTM property
def gtm_traffic_targets(domain_name, domains_with_properties):
# Looping through all traffic targets. First loop is for capturing and constructing the property name
n = 0
for entry in domains_with_properties['properties']:
property_name = json.dumps(entry['name'])
# Construct the full property name: property+domain.
full_property_name = (json.loads(property_name) + '.' + domain_name)
# Second loop is for grabbing all the servers per property under the domain. There can be multiple 'trafficTargets' sections.
for entry2 in domains_with_properties['properties'][n]['trafficTargets']:
server_names = json.dumps(entry2['servers'])
handout_cnames = json.dumps(entry2['handoutCName'])
#Convert to list
handout_cnames = json.loads(handout_cnames)
dict_response_servers = json.loads(server_names)
d_cname.setdefault(full_property_name, []).append(handout_cnames)
# Third loop for going through each server
for entry3 in dict_response_servers:
# Create and update on every cicle our dictionary with the property name and its associated servers. The server names will be added as a list to the dictionary value.
d_server.setdefault(full_property_name, []).append(entry3)
n = n +1
d_full['server-name'] = d_server
d_full['handout-cname'] = d_cname
return()
# Function that searches for a string in our created dictionary 'd'
def gtm_search_server(ip_fqdn_cname):
no_search_hit = True
print('\nDomains that contain your IP, FQDN or Handout CNAME ' + ip_fqdn_cname + ':')
for searchname, propertyname in d_full.items():
#print(propertyname)
for propertynames, serverlist in propertyname.items():
#print(values)
for servername in serverlist:
if servername == ip_fqdn_cname:
print(searchname + ' in:',propertynames)
no_search_hit = False
if no_search_hit:
print('\n*** Entry Not Found ***\n')
return()
# Function that splits the input property name into property and domain.
def gtm_property_and_domain(gtm_property, domains, accountKey_unique):
property_name = 'Not_Found'
# Caputure all the domain names, i.e. ['items'][n]['name']
for domain in domains:
domain_matchstring = '.'+domain
if domain_matchstring in gtm_property:
property_name = gtm_property.replace(domain_matchstring,'')
break
return(property_name, domain_matchstring[1:])
# Function used to search for GTM properties
def gtm_property_details(property_name, domain_name, accountKey_unique):
api_endpoint = urljoin(baseurl, '/config-gtm/v1/domains/' + domain_name + '/properties/' + property_name + accountKey_unique)
logging.info('API Endpoint: ' + api_endpoint)
response = session.get(api_endpoint)
return(response)
# Get the data centers IDs associated to the requested domain
def gtm_data_centers(domain_name, accountKey_unique):
api_endpoint = urljoin(baseurl, '/config-gtm/v1/domains/' + domain_name + '/datacenters' + accountKey_unique)
logging.info('API Endpoint: ' + api_endpoint)
response = session.get(api_endpoint)
print(json.dumps(response.json(), indent=4, sort_keys=True))
data_centers = json.loads(response.text)
return(data_centers)
# Correlate the data center IDs with the data center names
def match_datacenter_name(data_centers):
for data_center in data_centers['items']:
if data_center['nickname'] == args.datacenter:
data_center_id = data_center['datacenterId']
break
else:
data_center_id = 'NOT FOUND'
return(data_center_id)
# Get a GTM full property name
def get_property_details(user_property_name, accountKey_unique):
domains = gtm_domains(accountKey_unique)
property_name, domain_name = gtm_property_and_domain(user_property_name, domains, accountKey_unique)
property_details = gtm_property_details(property_name, domain_name, accountKey_unique)
return(property_details, domain_name, property_name)
# Upload a GTM property
def gtm_property_upload(property_details_json, domain_name, property_name, accountKey_unique):
headers = {'content-type': 'application/json'}
api_endpoint = urljoin(baseurl, '/config-gtm/v1/domains/' + domain_name + '/properties/' + property_name + accountKey_unique)
logging.info('API Endpoint: ' + api_endpoint)
response = session.put(api_endpoint, data=property_details_json, headers=headers)
print(json.dumps(response.json(), indent=4, sort_keys=True))
if response.status_code == 201:
print("Property", property_name, 'succesfully updated/created')
return()
# Main function
def main():
global args
parser = MyArgumentParser(
description='Global Traffic Manager Tools', add_help=False
)
parser.add_argument('--version', action='version', version='GTM Tool v1.0')
subparsers = parser.add_subparsers(title='Commands', dest='command', metavar="")
create_parser = subparsers.add_parser('help', help='Show available help').add_argument('args', metavar="", nargs=argparse.REMAINDER)
parser_search = subparsers.add_parser('search', help='Search for an IP Address, FQDN, CNAME handout in all GTM properties', add_help=False)
parser_show = subparsers.add_parser('show', help='Show a GTM property details', add_help=False)
parser_update = subparsers.add_parser('update', help='Modify and activate a property', add_help=False)
parser_clone = subparsers.add_parser('clone', help='Clone a property with new Data Center Name and Servers', add_help=False)
mandatory_search = parser_search.add_argument_group('required arguments')
mandatory_search.add_argument('--value', required=True, help='Search for an IP Address, FQDN, CNAME handout')
optional_search = parser_search.add_argument_group('optional arguments')
optional_search.add_argument('-e', '--edgerc', help='Config file [default: ~/.edgerc]')
optional_search.add_argument('-s', '--section', help='Config section in .edgerc [default: cloudlets]')
optional_search.add_argument('-k', '--key', help='Account Switch Key')
optional_search.add_argument('-v', '--verbose', action='store_true', help='Enable verbose mode')
optional_search.add_argument('-vv', '--debug', action='store_true', help='Enable verbose mode')
mandatory_show = parser_show.add_argument_group('required arguments')
mandatory_show.add_argument('--property', required=True, help='Property name to search')
optional_show = parser_show.add_argument_group('optional arguments')
optional_show.add_argument('-e', '--edgerc', help='Config file [default: ~/.edgerc]')
optional_show.add_argument('-s', '--section', help='Config section in .edgerc [default: cloudlets]')
optional_show.add_argument('-k', '--key', help='Account Switch Key')
optional_show.add_argument('-v', '--verbose', action='store_true', help='Enable verbose mode')
optional_show.add_argument('-vv', '--debug', action='store_true', help='Enable verbose mode')
mandatory_update = parser_update.add_argument_group('required arguments')
mandatory_update.add_argument('--property', required=True, help='Property name to update')
mandatory_update.add_argument('--datacenter', required=True, help='Data Center name to update')
mandatory_update.add_argument('--state', choices={'ON', 'OFF'}, required=True, help='Update a DC state')
optional_update = parser_update.add_argument_group('optional arguments')
optional_update.add_argument('-e', '--edgerc', help='Config file [default: ~/.edgerc]')
optional_update.add_argument('-s', '--section', help='Config section in .edgerc [default: cloudlets]')
optional_update.add_argument('-k', '--key', help='Account Switch Key')
optional_update.add_argument('-v', '--verbose', action='store_true', help='Enable verbose mode')
optional_update.add_argument('-vv', '--debug', action='store_true', help='Enable verbose mode')
mandatory_clone = parser_clone.add_argument_group('required arguments')
mandatory_clone.add_argument('--property', required=True, help='Property name to clone')
mandatory_clone.add_argument('--datacenter', required=True, help='Data Center Name')
mandatory_clone.add_argument('--server', required=True, help='Server name or IP address')
mandatory_clone.add_argument('--new_property', required=True, help='New GTM property name')
optional_clone = parser_clone.add_argument_group('optional arguments')
optional_clone.add_argument('-e', '--edgerc', help='Config file [default: ~/.edgerc]')
optional_clone.add_argument('-s', '--section', help='Config section in .edgerc [default: cloudlets]')
optional_clone.add_argument('-k', '--key', help='Account Switch Key')
optional_clone.add_argument('-v', '--verbose', action='store_true', help='Enable verbose mode')
optional_clone.add_argument('-vv', '--debug', action='store_true', help='Enable verbose mode')
args = parser.parse_args()
if len(sys.argv) <= 1:
parser.print_help()
return 0
accountKey_append = accountKey_unique = ''
if args.key:
accountKey_unique = '?accountSwitchKey=' + args.key
accountKey_append = '&accountSwitchKey=' + args.key
global baseurl, session
# Dictionary variables, d_full will be made of d_server and d_cname
global d_server, d_cname, d_full
d_server = {}
d_cname = {}
d_full = {}
if args.verbose:
logging.basicConfig(level=logging.INFO)
elif args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.command == 'help':
if len(args.args) > 0:
if args.args[0] == 'update':
parser_update.print_help()
else:
parser.print_help()
return 0
| |
"""
Skyline functions
These are shared functions that are required in multiple modules.
"""
import logging
from os.path import dirname, join, abspath, isfile
from os import path
from time import time
import socket
import datetime
import errno
import traceback
import json
import requests
try:
import urlparse
except ImportError:
import urllib.parse
try:
import urllib2
except ImportError:
import urllib.request
import urllib.error
import settings
try:
# @modified 20190518 - Branch #3002: docker
# from settings import GRAPHITE_HOST
from settings import CARBON_HOST
except:
# @modified 20190518 - Branch #3002: docker
# GRAPHITE_HOST = ''
CARBON_HOST = ''
try:
from settings import CARBON_PORT
except:
CARBON_PORT = ''
# @added 20191007 - Feature #3250: Allow Skyline to send metrics to another Carbon host
try:
from settings import SKYLINE_METRICS_CARBON_HOST
skyline_metrics_carbon_host = SKYLINE_METRICS_CARBON_HOST
except:
skyline_metrics_carbon_host = CARBON_HOST
try:
from settings import SKYLINE_METRICS_CARBON_PORT
skyline_metrics_carbon_port = SKYLINE_METRICS_CARBON_PORT
except:
skyline_metrics_carbon_port = CARBON_PORT
config = {'user': settings.PANORAMA_DBUSER,
'password': <PASSWORD>,
'host': settings.PANORAMA_DBHOST,
'port': settings.PANORAMA_DBPORT,
'database': settings.PANORAMA_DATABASE,
'raise_on_warnings': True}
def send_graphite_metric(current_skyline_app, metric, value):
"""
Sends the skyline_app metrics to the `GRAPHITE_HOST` if a graphite
host is defined.
:param current_skyline_app: the skyline app using this function
:param metric: the metric namespace
:param value: the metric value (as a str not an int)
:type current_skyline_app: str
:type metric: str
:type value: str
:return: ``True`` or ``False``
:rtype: boolean
"""
# @added 20190805 - Task #2828: Skyline - Python 3.7
try:
python_version
except:
from sys import version_info
python_version = int(version_info[0])
# @added 20190518 - Branch #3002: docker
# If the CARBON_HOST is set to the default do not send_graphite_metric
# @modified 20191007 - Feature #3250: Allow Skyline to send metrics to another Carbon host
# if CARBON_HOST == 'YOUR_GRAPHITE_HOST.example.com':
if skyline_metrics_carbon_host == 'YOUR_GRAPHITE_HOST.example.com':
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.info('CARBON_HOST is not configured in settings.py no CARBON_HOST to send metrics to')
return False
# @modified 20190518 - Branch #3002: docker
# Use the CARBON_HOST rather than GRAPHITE_HOST to allow for the 2 to be
# on different hosts
# if GRAPHITE_HOST != '':
# @modified 20191007 - Feature #3250: Allow Skyline to send metrics to another Carbon host
# if CARBON_HOST != '':
if skyline_metrics_carbon_host != '':
sock = socket.socket()
sock.settimeout(10)
# Handle connection error to Graphite #116 @etsy
# Fixed as per https://github.com/etsy/skyline/pull/116 and
# mlowicki:etsy_handle_connection_error_to_graphite
# Handle connection error to Graphite #7 @ earthgecko
# merged 1 commit into earthgecko:master from
# mlowicki:handle_connection_error_to_graphite on 16 Mar 2015
try:
# @modified 20190518 - Branch #3002: docker
# sock.connect((GRAPHITE_HOST, CARBON_PORT))
# @modified 20191007 - Feature #3250: Allow Skyline to send metrics to another Carbon host
# sock.connect((CARBON_HOST, CARBON_PORT))
sock.connect((skyline_metrics_carbon_host, skyline_metrics_carbon_port))
sock.settimeout(None)
except socket.error:
sock.settimeout(None)
# @modified 20190518 - Branch #3002: docker
# endpoint = '%s:%d' % (GRAPHITE_HOST, CARBON_PORT)
# @modified 20191007 - Feature #3250: Allow Skyline to send metrics to another Carbon host
# endpoint = '%s:%d' % (CARBON_HOST, CARBON_PORT)
endpoint = '%s:%d' % (skyline_metrics_carbon_host, skyline_metrics_carbon_port)
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.error(
'error :: cannot connect to Graphite at %s' % endpoint)
return False
# For the same reason as above
# sock.sendall('%s %s %i\n' % (name, value, time()))
try:
# @modified 20190805 - Task #2828: Skyline - Python 3.7
if python_version == 2:
sock.sendall('%s %s %i\n' % (metric, value, time()))
if python_version == 3:
message = '%s %s %i\n' % (metric, value, time())
sock.sendall(message.encode())
sock.close()
return True
except:
# @modified 20190518 - Branch #3002: docker
# endpoint = '%s:%d' % (GRAPHITE_HOST, CARBON_PORT)
# @modified 20191007 - Feature #3250: Allow Skyline to send metrics to another Carbon host
# endpoint = '%s:%d' % (CARBON_HOST, CARBON_PORT)
endpoint = '%s:%d' % (skyline_metrics_carbon_host, skyline_metrics_carbon_port)
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.error(
'error :: could not send data to Graphite at %s' % endpoint)
return False
return False
def mkdir_p(path):
"""
Create nested directories.
:param path: directory path to create
:type path: str
:return: returns True
"""
try:
os.getpid()
except:
import os
try:
os.makedirs(path, mode=0o755)
return True
# Python >2.5
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def load_metric_vars(current_skyline_app, metric_vars_file):
"""
Import the metric variables for a check from a metric check variables file
:param current_skyline_app: the skyline app using this function
:param metric_vars_file: the path and filename to the metric variables files
:type current_skyline_app: str
:type metric_vars_file: str
:return: the metric_vars module object or ``False``
:rtype: object or boolean
"""
try:
os.getpid()
except:
import os
try:
imp
except:
import imp
metric_vars = False
metric_vars_got = False
if os.path.isfile(metric_vars_file):
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.info(
'loading metric variables from import - metric_check_file - %s' % (
str(metric_vars_file)))
# Bug #1460: panorama check file fails
# global metric_vars
# current_logger.info('set global metric_vars')
with open(metric_vars_file) as f:
try:
metric_vars = imp.load_source('metric_vars', '', f)
metric_vars_got = True
except:
current_logger.info(traceback.format_exc())
msg = 'failed to import metric variables - metric_check_file'
current_logger.error(
'error :: %s - %s' % (msg, str(metric_vars_file)))
metric_vars = False
if settings.ENABLE_DEBUG and metric_vars_got:
current_logger.info(
'metric_vars determined - metric variable - metric - %s' % str(metric_vars.metric))
else:
current_logger.error('error :: metric_vars_file not found - %s' % (str(metric_vars_file)))
return metric_vars
def write_data_to_file(current_skyline_app, write_to_file, mode, data):
"""
Write date to a file
:param current_skyline_app: the skyline app using this function
:param file: the path and filename to write the data into
:param mode: ``w`` to overwrite, ``a`` to append
:param data: the data to write to the file
:type current_skyline_app: str
:type file: str
:type mode: str
:type data: str
:return: ``True`` or ``False``
:rtype: boolean
"""
try:
os.getpid()
except:
import os
try:
python_version
except:
from sys import version_info
python_version = int(version_info[0])
file_dir = os.path.dirname(write_to_file)
if not os.path.exists(file_dir):
try:
os.makedirs(file_dir, mode=0o755)
# Python >2.5
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if not os.path.exists(file_dir):
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
current_logger.error(
'error :: could not create directory - %s' % (str(file_dir)))
try:
with open(write_to_file, mode) as fh:
fh.write(data)
if python_version == 2:
os.chmod(write_to_file, 0o644)
if python_version == 3:
os.chmod(write_to_file, mode=0o644)
return True
except:
return False
return False
def fail_check(current_skyline_app, failed_check_dir, check_file_to_fail):
"""
Move a failed check file.
:param current_skyline_app: the skyline app using this function
:param failed_check_dir: the directory where failed checks are moved to
:param check_file_to_fail: failed check file to move
:type current_skyline_app: str
:type failed_check_dir: str
:type check_file_to_fail: str
:return: ``True``, ``False``
:rtype: boolean
"""
try:
os.getpid()
except:
import os
try:
shutil
except:
import shutil
try:
python_version
except:
from sys import version_info
python_version = int(version_info[0])
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
if not os.path.exists(failed_check_dir):
try:
mkdir_p(failed_check_dir)
current_logger.info(
'created failed_check_dir - %s' % str(failed_check_dir))
except:
current_logger.info(traceback.format_exc())
current_logger.error(
'error :: failed to create failed_check_dir - %s' %
str(failed_check_dir))
return False
check_file_name = os.path.basename(str(check_file_to_fail))
failed_check_file = '%s/%s' % (failed_check_dir, check_file_name)
try:
shutil.move(check_file_to_fail, failed_check_file)
if python_version == 2:
os.chmod(failed_check_file, 0o644)
if python_version == 3:
os.chmod(failed_check_file, mode=0o644)
current_logger.info('moved check file to - %s' % failed_check_file)
return True
except OSError:
current_logger.info(traceback.format_exc())
msg = 'failed to move check file to -%s' % failed_check_file
current_logger.error('error :: %s' % msg)
pass
return False
def get_graphite_metric(
current_skyline_app, metric, from_timestamp, until_timestamp, data_type,
output_object):
"""
Fetch data from graphite and return it as object or save it as file
:param current_skyline_app: the skyline app using this function
:param metric: metric name
:param from_timestamp: unix timestamp
:param until_timestamp: unix timestamp
:param data_type: image or json
:param output_object: object or path and filename to save data as, if set to
object, the object is returned
:type current_skyline_app: str
:type metric: str
:type from_timestamp: str
:type until_timestamp: str
:type data_type: str
:type output_object: str
:return: timeseries string, ``True``, ``False``
:rtype: str or boolean
"""
try:
os.getpid()
except:
import os
try:
python_version
except:
from sys import version_info
python_version = int(version_info[0])
try:
quote(current_skyline_app, safe='')
except:
from requests.utils import quote
try:
time.time()
except:
import time
try:
re
except:
import re
current_skyline_app_logger = str(current_skyline_app) + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
# if settings.ENABLE_DEBUG:
current_logger.info('graphite_metric - %s' % (metric))
# @added 20160803 - Unescaped Graphite target - https://github.com/earthgecko/skyline/issues/20
# bug1546: Unescaped Graphite target
new_metric_namespace = metric.replace(':', '\:')
metric_namespace = new_metric_namespace.replace('(', '\(')
metric = metric_namespace.replace(')', '\)')
| |
<reponame>bshaffer/google-cloud-sdk
# -*- coding: utf-8 -*- #
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""argparse Actions for use with calliope.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import argparse
import io
import os
import sys
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import markdown
from googlecloudsdk.calliope import parser_errors
from googlecloudsdk.core import log
from googlecloudsdk.core import metrics
from googlecloudsdk.core import properties
from googlecloudsdk.core.console import console_io
from googlecloudsdk.core.document_renderers import render_document
import six
class _AdditionalHelp(object):
"""Simple class for passing additional help messages to Actions."""
def __init__(self, label, message):
self.label = label
self.message = message
def GetArgparseBuiltInAction(action):
"""Get an argparse.Action from a string.
This function takes one of the supplied argparse.Action strings (see below)
and returns the corresponding argparse.Action class.
This "work around" is (e.g. hack) is necessary due to the fact these required
action mappings are only exposed through subclasses of
argparse._ActionsContainer as opposed to a static function or global variable.
Args:
action: string, one of the following supplied argparse.Action names:
'store', 'store_const', 'store_false', 'append', 'append_const', 'count',
'version', 'parsers'.
Returns:
argparse.Action, the action class to use.
Raises:
ValueError: For unknown action string.
"""
# pylint:disable=protected-access
# Disabling lint check to access argparse._ActionsContainer
dummy_actions_container = argparse._ActionsContainer(description=None,
prefix_chars=None,
argument_default=None,
conflict_handler='error')
action_cls = dummy_actions_container._registry_get('action', action)
if action_cls is None:
raise ValueError('unknown action "{0}"'.format(action))
return action_cls
# pylint:disable=protected-access
def FunctionExitAction(func):
"""Get an argparse.Action that runs the provided function, and exits.
Args:
func: func, the function to execute.
Returns:
argparse.Action, the action to use.
"""
class Action(argparse.Action):
def __init__(self, **kwargs):
kwargs['nargs'] = 0
super(Action, self).__init__(**kwargs)
def __call__(self, parser, namespace, values, option_string=None):
base.LogCommand(parser.prog, namespace)
metrics.Loaded()
func()
sys.exit(0)
return Action
def StoreProperty(prop):
"""Get an argparse action that stores a value in a property.
Also stores the value in the namespace object, like the default action. The
value is stored in the invocation stack, rather than persisted permanently.
Args:
prop: properties._Property, The property that should get the invocation
value.
Returns:
argparse.Action, An argparse action that routes the value correctly.
"""
class Action(argparse.Action):
"""The action created for StoreProperty."""
# store_property is referenced in calliope.parser_arguments.add_argument
store_property = (prop, None, None)
def __init__(self, *args, **kwargs):
super(Action, self).__init__(*args, **kwargs)
option_strings = kwargs.get('option_strings')
if option_strings:
option_string = option_strings[0]
else:
option_string = None
properties.VALUES.SetInvocationValue(prop, None, option_string)
if '_ARGCOMPLETE' in os.environ:
self._orig_class = argparse._StoreAction # pylint:disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
properties.VALUES.SetInvocationValue(prop, values, option_string)
setattr(namespace, self.dest, values)
return Action
def StoreBooleanProperty(prop):
"""Get an argparse action that stores a value in a Boolean property.
Handles auto-generated --no-* inverted flags by inverting the value.
Also stores the value in the namespace object, like the default action. The
value is stored in the invocation stack, rather than persisted permanently.
Args:
prop: properties._Property, The property that should get the invocation
value.
Returns:
argparse.Action, An argparse action that routes the value correctly.
"""
class Action(argparse.Action):
"""The action created for StoreBooleanProperty."""
# store_property is referenced in calliope.parser_arguments.add_argument
store_property = (prop, 'bool', None)
def __init__(self, *args, **kwargs):
kwargs = dict(kwargs)
# Bool flags don't take any args. There is one legacy one that needs to
# so only do this if the flag doesn't specifically register nargs.
if 'nargs' not in kwargs:
kwargs['nargs'] = 0
option_strings = kwargs.get('option_strings')
if option_strings:
option_string = option_strings[0]
else:
option_string = None
if option_string and option_string.startswith('--no-'):
self._inverted = True
kwargs['nargs'] = 0
kwargs['const'] = None
kwargs['choices'] = None
else:
self._inverted = False
super(Action, self).__init__(*args, **kwargs)
properties.VALUES.SetInvocationValue(prop, None, option_string)
if '_ARGCOMPLETE' in os.environ:
self._orig_class = argparse._StoreAction # pylint:disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
if self._inverted:
if values in ('true', []):
values = 'false'
else:
values = 'false'
elif values == []: # pylint: disable=g-explicit-bool-comparison, need exact [] equality test
values = 'true'
properties.VALUES.SetInvocationValue(prop, values, option_string)
setattr(namespace, self.dest, values)
return Action
def StoreConstProperty(prop, const):
"""Get an argparse action that stores a constant in a property.
Also stores the constannt in the namespace object, like the store_true action.
The const is stored in the invocation stack, rather than persisted
permanently.
Args:
prop: properties._Property, The property that should get the invocation
value.
const: str, The constant that should be stored in the property.
Returns:
argparse.Action, An argparse action that routes the value correctly.
"""
class Action(argparse.Action):
"""The action created for StoreConstProperty."""
# store_property is referenced in calliope.parser_arguments.add_argument
store_property = (prop, 'value', const)
def __init__(self, *args, **kwargs):
kwargs = dict(kwargs)
kwargs['nargs'] = 0
super(Action, self).__init__(*args, **kwargs)
if '_ARGCOMPLETE' in os.environ:
self._orig_class = argparse._StoreConstAction # pylint:disable=protected-access
def __call__(self, parser, namespace, values, option_string=None):
properties.VALUES.SetInvocationValue(prop, const, option_string)
setattr(namespace, self.dest, const)
return Action
# pylint:disable=pointless-string-statement
""" Some example short help outputs follow.
$ gcloud -h
usage: gcloud [optional flags] <group | command>
group is one of auth | components | config | dns | sql
command is one of init | interactive | su | version
Google Cloud Platform CLI/API.
optional flags:
-h, --help Print this help message and exit.
--project PROJECT Google Cloud Platform project to use for this
invocation.
--quiet, -q Disable all interactive prompts when running gcloud
commands. If input is required, defaults will be used,
or an error will be raised.
groups:
auth Manage oauth2 credentials for the Google Cloud SDK.
components Install, update, or remove the tools in the Google
Cloud SDK.
config View and edit Google Cloud SDK properties.
dns Manage Cloud DNS.
sql Manage Cloud SQL databases.
commands:
init Initialize a gcloud workspace in the current directory.
interactive Use this tool in an interactive python shell.
su Switch the user account.
version Print version information for Cloud SDK components.
$ gcloud auth -h
usage: gcloud auth [optional flags] <command>
command is one of activate_git_p2d | activate_refresh_token |
activate_service_account | list | login | revoke
Manage oauth2 credentials for the Google Cloud SDK.
optional flags:
-h, --help Print this help message and exit.
commands:
activate_git_p2d Activate an account for git push-to-deploy.
activate_refresh_token
Get credentials via an existing refresh token.
activate_service_account
Get credentials via the private key for a service
account.
list List the accounts for known credentials.
login Get credentials via Google's oauth2 web flow.
revoke Revoke authorization for credentials.
$ gcloud sql instances create -h
usage: gcloud sql instances create
[optional flags] INSTANCE
Creates a new Cloud SQL instance.
optional flags:
-h, --help Print this help message and exit.
--authorized-networks AUTHORIZED_NETWORKS
The list of external networks that are allowed to
connect to the instance. Specified in CIDR notation,
also known as 'slash' notation (e.g. 192.168.100.0/24).
--authorized-gae-apps AUTHORIZED_GAE_APPS
List of App Engine app ids that can access this
instance.
--activation-policy ACTIVATION_POLICY; default="ON_DEMAND"
The activation policy for this instance. This specifies
when the instance should be activated and is applicable
only when the instance state is RUNNABLE. Defaults to
ON_DEMAND.
--follow-gae-app FOLLOW_GAE_APP
The App Engine app this instance should follow. It must
be in the same region as the instance.
--backup-start-time BACKUP_START_TIME
Start time for the daily backup configuration in UTC
timezone,in the 24 hour format - HH:MM.
--gce-zone GCE_ZONE The preferred Compute Engine zone (e.g. us-central1-a,
us-central1-b, etc.).
--pricing-plan PRICING_PLAN, -p PRICING_PLAN; default="PER_USE"
The pricing plan for this instance. Defaults to
PER_USE.
--region REGION; default="us-east1"
The geographical region. Can be us-east1 or europe-
west1. Defaults to us-east1.
--replication REPLICATION; default="SYNCHRONOUS"
The type of replication this instance uses. Defaults to
SYNCHRONOUS.
--tier TIER, -t TIER; default="D0"
The tier of service for this instance, for example D0,
D1. Defaults to D0.
--assign-ip Specified if the instance must be assigned an IP
address.
--enable-bin-log Specified if binary log must be enabled. If backup
configuration is disabled, binary log must be disabled
as well.
--no-backup Specified if daily backup must be disabled.
positional arguments:
INSTANCE Cloud SQL instance ID.
"""
# pylint:disable=pointless-string-statement
"""
$ gcloud auth activate-service-account -h
usage: gcloud | |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Modified from wenet(https://github.com/wenet-e2e/wenet)
"""U2 ASR Model
Unified Streaming and Non-streaming Two-pass End-to-end Model for Speech Recognition
(https://arxiv.org/pdf/2012.05481.pdf)
"""
import sys
import time
from collections import defaultdict
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
import paddle
from paddle import jit
from paddle import nn
from paddlespeech.s2t.decoders.scorers.ctc import CTCPrefixScorer
from paddlespeech.s2t.frontend.utility import IGNORE_ID
from paddlespeech.s2t.frontend.utility import load_cmvn
from paddlespeech.s2t.models.asr_interface import ASRInterface
from paddlespeech.s2t.modules.cmvn import GlobalCMVN
from paddlespeech.s2t.modules.ctc import CTCDecoderBase
from paddlespeech.s2t.modules.decoder import TransformerDecoder
from paddlespeech.s2t.modules.encoder import ConformerEncoder
from paddlespeech.s2t.modules.encoder import TransformerEncoder
from paddlespeech.s2t.modules.initializer import DefaultInitializerContext
from paddlespeech.s2t.modules.loss import LabelSmoothingLoss
from paddlespeech.s2t.modules.mask import make_pad_mask
from paddlespeech.s2t.modules.mask import mask_finished_preds
from paddlespeech.s2t.modules.mask import mask_finished_scores
from paddlespeech.s2t.modules.mask import subsequent_mask
from paddlespeech.s2t.utils import checkpoint
from paddlespeech.s2t.utils import layer_tools
from paddlespeech.s2t.utils.ctc_utils import remove_duplicates_and_blank
from paddlespeech.s2t.utils.log import Log
from paddlespeech.s2t.utils.tensor_utils import add_sos_eos
from paddlespeech.s2t.utils.tensor_utils import pad_sequence
from paddlespeech.s2t.utils.tensor_utils import th_accuracy
from paddlespeech.s2t.utils.utility import log_add
from paddlespeech.s2t.utils.utility import UpdateConfig
__all__ = ["U2Model", "U2InferModel"]
logger = Log(__name__).getlog()
class U2BaseModel(ASRInterface, nn.Layer):
"""CTC-Attention hybrid Encoder-Decoder model"""
def __init__(self,
vocab_size: int,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
ctc: CTCDecoderBase,
ctc_weight: float=0.5,
ignore_id: int=IGNORE_ID,
lsm_weight: float=0.0,
length_normalized_loss: bool=False,
**kwargs):
assert 0.0 <= ctc_weight <= 1.0, ctc_weight
nn.Layer.__init__(self)
# note that eos is the same as sos (equivalent ID)
self.sos = vocab_size - 1
self.eos = vocab_size - 1
self.vocab_size = vocab_size
self.ignore_id = ignore_id
self.ctc_weight = ctc_weight
self.encoder = encoder
self.decoder = decoder
self.ctc = ctc
self.criterion_att = LabelSmoothingLoss(
size=vocab_size,
padding_idx=ignore_id,
smoothing=lsm_weight,
normalize_length=length_normalized_loss, )
def forward(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
text: paddle.Tensor,
text_lengths: paddle.Tensor,
) -> Tuple[Optional[paddle.Tensor], Optional[paddle.Tensor], Optional[
paddle.Tensor]]:
"""Frontend + Encoder + Decoder + Calc loss
Args:
speech: (Batch, Length, ...)
speech_lengths: (Batch, )
text: (Batch, Length)
text_lengths: (Batch,)
Returns:
total_loss, attention_loss, ctc_loss
"""
assert text_lengths.dim() == 1, text_lengths.shape
# Check that batch_size is unified
assert (speech.shape[0] == speech_lengths.shape[0] == text.shape[0] ==
text_lengths.shape[0]), (speech.shape, speech_lengths.shape,
text.shape, text_lengths.shape)
# 1. Encoder
start = time.time()
encoder_out, encoder_mask = self.encoder(speech, speech_lengths)
encoder_time = time.time() - start
#logger.debug(f"encoder time: {encoder_time}")
#TODO(<NAME>): sum not support bool type
#encoder_out_lens = encoder_mask.squeeze(1).sum(1) #[B, 1, T] -> [B]
encoder_out_lens = encoder_mask.squeeze(1).cast(paddle.int64).sum(
1) #[B, 1, T] -> [B]
# 2a. Attention-decoder branch
loss_att = None
if self.ctc_weight != 1.0:
start = time.time()
loss_att, acc_att = self._calc_att_loss(encoder_out, encoder_mask,
text, text_lengths)
decoder_time = time.time() - start
#logger.debug(f"decoder time: {decoder_time}")
# 2b. CTC branch
loss_ctc = None
if self.ctc_weight != 0.0:
start = time.time()
loss_ctc = self.ctc(encoder_out, encoder_out_lens, text,
text_lengths)
ctc_time = time.time() - start
#logger.debug(f"ctc time: {ctc_time}")
if loss_ctc is None:
loss = loss_att
elif loss_att is None:
loss = loss_ctc
else:
loss = self.ctc_weight * loss_ctc + (1 - self.ctc_weight) * loss_att
return loss, loss_att, loss_ctc
def _calc_att_loss(
self,
encoder_out: paddle.Tensor,
encoder_mask: paddle.Tensor,
ys_pad: paddle.Tensor,
ys_pad_lens: paddle.Tensor, ) -> Tuple[paddle.Tensor, float]:
"""Calc attention loss.
Args:
encoder_out (paddle.Tensor): [B, Tmax, D]
encoder_mask (paddle.Tensor): [B, 1, Tmax]
ys_pad (paddle.Tensor): [B, Umax]
ys_pad_lens (paddle.Tensor): [B]
Returns:
Tuple[paddle.Tensor, float]: attention_loss, accuracy rate
"""
ys_in_pad, ys_out_pad = add_sos_eos(ys_pad, self.sos, self.eos,
self.ignore_id)
ys_in_lens = ys_pad_lens + 1
# 1. Forward decoder
decoder_out, _ = self.decoder(encoder_out, encoder_mask, ys_in_pad,
ys_in_lens)
# 2. Compute attention loss
loss_att = self.criterion_att(decoder_out, ys_out_pad)
acc_att = th_accuracy(
decoder_out.view(-1, self.vocab_size),
ys_out_pad,
ignore_label=self.ignore_id, )
return loss_att, acc_att
def _forward_encoder(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
simulate_streaming: bool=False,
) -> Tuple[paddle.Tensor, paddle.Tensor]:
"""Encoder pass.
Args:
speech (paddle.Tensor): [B, Tmax, D]
speech_lengths (paddle.Tensor): [B]
decoding_chunk_size (int, optional): chuck size. Defaults to -1.
num_decoding_left_chunks (int, optional): nums chunks. Defaults to -1.
simulate_streaming (bool, optional): streaming or not. Defaults to False.
Returns:
Tuple[paddle.Tensor, paddle.Tensor]:
encoder hiddens (B, Tmax, D),
encoder hiddens mask (B, 1, Tmax).
"""
# Let's assume B = batch_size
# 1. Encoder
if simulate_streaming and decoding_chunk_size > 0:
encoder_out, encoder_mask = self.encoder.forward_chunk_by_chunk(
speech,
decoding_chunk_size=decoding_chunk_size,
num_decoding_left_chunks=num_decoding_left_chunks
) # (B, maxlen, encoder_dim)
else:
encoder_out, encoder_mask = self.encoder(
speech,
speech_lengths,
decoding_chunk_size=decoding_chunk_size,
num_decoding_left_chunks=num_decoding_left_chunks
) # (B, maxlen, encoder_dim)
return encoder_out, encoder_mask
def recognize(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
beam_size: int=10,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
simulate_streaming: bool=False, ) -> paddle.Tensor:
""" Apply beam search on attention decoder
Args:
speech (paddle.Tensor): (batch, max_len, feat_dim)
speech_length (paddle.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
paddle.Tensor: decoding result, (batch, max_result_len)
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
device = speech.place
batch_size = speech.shape[0]
# Let's assume B = batch_size and N = beam_size
# 1. Encoder
encoder_out, encoder_mask = self._forward_encoder(
speech, speech_lengths, decoding_chunk_size,
num_decoding_left_chunks,
simulate_streaming) # (B, maxlen, encoder_dim)
maxlen = encoder_out.shape[1]
encoder_dim = encoder_out.shape[2]
running_size = batch_size * beam_size
encoder_out = encoder_out.unsqueeze(1).repeat(1, beam_size, 1, 1).view(
running_size, maxlen, encoder_dim) # (B*N, maxlen, encoder_dim)
encoder_mask = encoder_mask.unsqueeze(1).repeat(
1, beam_size, 1, 1).view(running_size, 1,
maxlen) # (B*N, 1, max_len)
hyps = paddle.ones(
[running_size, 1], dtype=paddle.long).fill_(self.sos) # (B*N, 1)
# log scale score
scores = paddle.to_tensor(
[0.0] + [-float('inf')] * (beam_size - 1), dtype=paddle.float)
scores = scores.to(device).repeat(batch_size).unsqueeze(1).to(
device) # (B*N, 1)
end_flag = paddle.zeros_like(scores, dtype=paddle.bool) # (B*N, 1)
cache: Optional[List[paddle.Tensor]] = None
# 2. Decoder forward step by step
for i in range(1, maxlen + 1):
# Stop if all batch and all beam produce eos
# TODO(<NAME>): if end_flag.sum() == running_size:
if end_flag.cast(paddle.int64).sum() == running_size:
break
# 2.1 Forward decoder step
hyps_mask = subsequent_mask(i).unsqueeze(0).repeat(
running_size, 1, 1).to(device) # (B*N, i, i)
# logp: (B*N, vocab)
logp, cache = self.decoder.forward_one_step(
encoder_out, encoder_mask, hyps, hyps_mask, cache)
# 2.2 First beam prune: select topk best prob at current time
top_k_logp, top_k_index = logp.topk(beam_size) # (B*N, N)
top_k_logp = mask_finished_scores(top_k_logp, end_flag)
top_k_index = mask_finished_preds(top_k_index, end_flag, self.eos)
# 2.3 Seconde beam prune: select topk score with history
scores = scores + top_k_logp # (B*N, N), broadcast add
scores = scores.view(batch_size, beam_size * beam_size) # (B, N*N)
scores, offset_k_index = scores.topk(k=beam_size) # (B, N)
scores = scores.view(-1, 1) # (B*N, 1)
# 2.4. Compute base index in top_k_index,
# regard top_k_index as (B*N*N),regard offset_k_index as (B*N),
# then find offset_k_index in top_k_index
base_k_index = paddle.arange(batch_size).view(-1, 1).repeat(
1, beam_size) # (B, N)
base_k_index = base_k_index * beam_size * beam_size
best_k_index = base_k_index.view(-1) + offset_k_index.view(
-1) # (B*N)
# 2.5 Update best hyps
best_k_pred = paddle.index_select(
top_k_index.view(-1), index=best_k_index, axis=0) # (B*N)
best_hyps_index = best_k_index // beam_size
last_best_k_hyps = paddle.index_select(
hyps, index=best_hyps_index, axis=0) # (B*N, i)
hyps = paddle.cat(
(last_best_k_hyps, best_k_pred.view(-1, 1)),
dim=1) # (B*N, i+1)
# 2.6 Update end flag
end_flag = paddle.eq(hyps[:, -1], self.eos).view(-1, 1)
# 3. Select best of best
scores = scores.view(batch_size, beam_size)
# TODO: length normalization
best_index = paddle.argmax(scores, axis=-1).long() # (B)
best_hyps_index = best_index + paddle.arange(
batch_size, dtype=paddle.long) * beam_size
best_hyps = paddle.index_select(hyps, index=best_hyps_index, axis=0)
best_hyps = best_hyps[:, 1:]
return best_hyps
def ctc_greedy_search(
self,
speech: paddle.Tensor,
speech_lengths: paddle.Tensor,
decoding_chunk_size: int=-1,
num_decoding_left_chunks: int=-1,
simulate_streaming: bool=False, ) -> List[List[int]]:
""" Apply CTC greedy search
Args:
speech (paddle.Tensor): (batch, max_len, feat_dim)
speech_length (paddle.Tensor): (batch, )
beam_size (int): beam size for beam search
decoding_chunk_size (int): decoding chunk for dynamic chunk
trained model.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
0: used for training, it's prohibited here
simulate_streaming (bool): whether do encoder forward in a
streaming fashion
Returns:
List[List[int]]: best path result
"""
assert speech.shape[0] == speech_lengths.shape[0]
assert decoding_chunk_size != 0
batch_size = speech.shape[0]
# Let's assume B = batch_size
# encoder_out: (B, maxlen, encoder_dim)
# encoder_mask: (B, 1, Tmax)
encoder_out, encoder_mask = self._forward_encoder(
speech, speech_lengths, decoding_chunk_size,
num_decoding_left_chunks, simulate_streaming)
maxlen = encoder_out.shape[1]
# (TODO <NAME>): bool no | |
import warnings
from complexity_considerations_package.binary_layer import BinaryConv2D
import config
if config.tf:
from tensorflow.keras.layers import (GlobalAveragePooling2D, GlobalMaxPooling2D, Dense,
multiply, add, Permute, Conv2D,
Reshape, BatchNormalization, ELU, MaxPooling2D, Dropout, Lambda)
import tensorflow.keras.backend as K
else:
from keras.layers import (GlobalAveragePooling2D, GlobalMaxPooling2D, Dense,
multiply, add, Permute, Conv2D,
Reshape, BatchNormalization, ELU, MaxPooling2D, Dropout, Lambda)
import keras.backend as K
from tensorflow.keras.regularizers import l2
__authors__ = "<NAME>, <NAME> and <NAME>"
__copyright__ = "Machine Listeners Valencia"
__credits__ = ["Machine Listeners Valencia"]
__license__ = "MIT License"
__version__ = "0.5.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Dev"
__date__ = "2020"
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
require_flatten,
weights=None):
"""Internal utility to compute/validate a model's tensor shape.
# Arguments
input_shape: Either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: Default input width/height for the model.
min_size: Minimum input width/height accepted by the model.
data_format: Image data format to use.
require_flatten: Whether the model is expected to
be linked to a classifier via a Flatten layer.
weights: One of `None` (random initialization)
or 'imagenet' (pre-training on ImageNet).
If weights='imagenet' input channels must be equal to 3.
# Returns
An integer shape tuple (may include None entries).
# Raises
ValueError: In case of invalid argument values.
"""
if weights != 'imagenet' and input_shape and len(input_shape) == 3:
if data_format == 'channels_first':
if input_shape[0] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with {input_shape}'
' input channels.'.format(input_shape=input_shape[0]))
default_shape = (input_shape[0], default_size, default_size)
else:
if input_shape[-1] not in {1, 3}:
warnings.warn(
'This model usually expects 1 or 3 input channels. '
'However, it was passed an input_shape with {n_input_channels}'
' input channels.'.format(n_input_channels=input_shape[-1]))
default_shape = (default_size, default_size, input_shape[-1])
else:
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if weights == 'imagenet' and require_flatten:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting `include_top=True` '
'and loading `imagenet` weights, '
'`input_shape` should be {default_shape}.'.format(default_shape=default_shape))
return default_shape
if input_shape:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape={input_shape}`'.format(input_shape=input_shape))
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least {min_size}x{min_size};'
' got `input_shape={input_shape}`'.format(min_size=min_size,
input_shape=input_shape))
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError(
'`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3 and weights == 'imagenet':
raise ValueError('The input must have 3 channels; got '
'`input_shape={input_shape}`'.format(input_shape=input_shape))
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least {min_size}x{min_size};'
' got `input_shape={input_shape}`'.format(min_size=min_size,
input_shape=input_shape))
else:
if require_flatten:
input_shape = default_shape
else:
if data_format == 'channels_first':
input_shape = (3, None, None)
else:
input_shape = (None, None, 3)
if require_flatten:
if None in input_shape:
raise ValueError('If `include_top` is True, '
'you should specify a static `input_shape`. '
'Got `input_shape={input_shape}`'.format(input_shape=input_shape))
return input_shape
def _tensor_shape(tensor):
if config.tf:
#return getattr(tensor, 'get_shape()')
return tensor.get_shape()
else:
return getattr(tensor, '_keras_shape')
def squeeze_excite_block(input_tensor, index, ratio=16, trident=None):
""" Create a channel-wise squeeze-excite block
Args:
input_tensor: input Keras tensor
ratio: number of output filters
Returns: a Keras tensor
References
- [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
"""
trident_suffix = '' if trident is None else trident
init = input_tensor
channel_axis = 1 if K.image_data_format() == "channels_first" else -1
filters = _tensor_shape(init)[channel_axis]
se_shape = (1, 1, filters)
se = GlobalAveragePooling2D()(init)
se = Reshape(se_shape)(se)
se = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False,
name='dense_ratio_' + str(index) + trident_suffix)(se)
se = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False,
name='dense_sigmoid_' + str(index) + trident_suffix)(se)
if K.image_data_format() == 'channels_first':
se = Permute((3, 1, 2))(se)
x = multiply([init, se])
return x
def spatial_squeeze_excite_block(input_tensor, index, binary_layer=False, trident=None):
""" Create a spatial squeeze-excite block
Args:
input_tensor: input Keras tensor
Returns: a Keras tensor
References
- [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
:param binary_layer:
"""
trident_suffix = '' if trident is None else trident
if binary_layer is True:
se = BinaryConv2D(1, kernel_size=1, activation='sigmoid', use_bias=False,
kernel_initializer='he_normal',
name='conv1d_' + str(index) + trident_suffix)(input_tensor)
else:
se = Conv2D(1, (1, 1), activation='sigmoid', use_bias=False,
kernel_initializer='he_normal',
name='conv1d_' + str(index) + trident_suffix)(input_tensor)
x = multiply([input_tensor, se])
return x
def channel_spatial_squeeze_excite(input_tensor, index, ratio=16, binary_layer=False, trident=None):
""" Create a spatial squeeze-excite block
Args:
input_tensor: input Keras tensor
ratio: number of output filters
Returns: a Keras tensor
References
- [Squeeze and Excitation Networks](https://arxiv.org/abs/1709.01507)
- [Concurrent Spatial and Channel Squeeze & Excitation in Fully Convolutional Networks](https://arxiv.org/abs/1803.02579)
"""
cse = squeeze_excite_block(input_tensor, index, ratio, trident=trident)
sse = spatial_squeeze_excite_block(input_tensor, index, binary_layer=binary_layer, trident=trident)
x = add([cse, sse])
return x
def conv_standard_post(inp, nfilters, ratio, index, pre_act=False, shortcut='conv', binary_layer=False, trident=None):
""" Module presented in https://ieeexplore.ieee.org/abstract/document/9118879
:param inp: input tensor
:param nfilters: number of filter of convolutional layers
:param ratio: parameter for squeeze-excitation module
:param pre_act:
:param shortcut:
:param binary_layer:
:param trident:
:return: tensor
"""
x1 = inp
bn_name = 'bn_' + str(index)
elu_name = 'elu_' + str(index)
conv_name = 'conv_' + str(index)
trident_suffix = '' if trident is None else trident
if pre_act:
x = BatchNormalization(name=bn_name + '_a' + trident_suffix)(inp)
x = ELU(name=elu_name + trident_suffix)(x)
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_a' + trident_suffix)(x)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_a' + trident_suffix)(x)
x = BatchNormalization(name=bn_name + '_b' + trident_suffix)(x)
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_b' + trident_suffix)(x)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_b' + trident_suffix)(x)
else:
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_a' + trident_suffix)(inp)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_a' + trident_suffix)(inp)
x = BatchNormalization(name=bn_name + '_a' + trident_suffix)(x)
x = ELU(name=elu_name + trident_suffix)(x)
if binary_layer is True:
x = BinaryConv2D(nfilters, kernel_size=3, use_bias=False, padding='same',
name=conv_name + '_b' + trident_suffix)(x)
else:
x = Conv2D(nfilters, 3, padding='same', name=conv_name + '_b' + trident_suffix)(x)
x = BatchNormalization(name=bn_name + '_b' + trident_suffix)(x)
if shortcut == 'conv':
if binary_layer is True:
x1 = BinaryConv2D(nfilters, kernel_size=1, use_bias=False, padding='same',
name=conv_name + '_shortcut' + trident_suffix)(x1)
else:
x1 = Conv2D(nfilters, 1, padding='same', name=conv_name + '_shortcut' + trident_suffix)(x1)
x1 = BatchNormalization(name=bn_name + '_shortcut' + trident_suffix)(x1)
elif shortcut == 'global_avg' or shortcut == 'global_max':
x1 = Lambda(pad_matrix_global, arguments={'type': shortcut},
name='lambda_padding_' + str(index) + trident_suffix)(x1)
x = module_addition(x, x1, index, 'a' + trident_suffix)
x = ELU(name=elu_name + '_after_addition' + trident_suffix)(x)
x = channel_spatial_squeeze_excite(x, index, ratio=ratio, binary_layer=binary_layer, trident=trident)
x = module_addition(x, x1, index, 'b' + trident_suffix)
return x
def network_module(inp, nfilters, ratio, pool_size, dropout_rate, index, pre_act=False, shortcut='conv',
binary_layer=False, trident=None):
""" Implementation presented in https://ieeexplore.ieee.org/abstract/document/9118879
:param inp: input tensor
:param nfilters: number of filter of convolutional layers
:param ratio: parameter for squeeze-excitation module
:param pool_size: size of the pool
:param dropout_rate: rate for dropout
:param index:
:param pre_act: pre_activation flag
:param shortcut:
:param binary_layer:
:param trident:
:return:
"""
trident_suffix = '' if trident is None else trident
x = conv_standard_post(inp, nfilters, ratio, index, pre_act=pre_act, shortcut=shortcut, binary_layer=binary_layer,
trident=trident)
x = MaxPooling2D(pool_size=pool_size, name='pool_' + str(index) + trident_suffix)(x)
x = Dropout(dropout_rate, name='dropout_' + str(index) + trident_suffix)(x)
return x
def module_addition(inp1, inp2, index, suffix):
"""
:param inp1:
:param inp2:
:param index:
:param suffix:
:return:
"""
if K.int_shape(inp1)[3] != K.int_shape(inp2)[3]:
x = add(
[inp1, Lambda(lambda y: K.repeat_elements(y, rep=int(K.int_shape(inp1)[3] // K.int_shape(inp2)[3]), axis=3),
name='lambda_add_' + str(index) + '_' + str(suffix))(inp2)])
else:
x = add([inp1, inp2])
return x
def pad_matrix_global(inp, type='global_avg'):
"""
:param inp:
:param type:
:return:
"""
h = K.int_shape(inp)[1]
w = K.int_shape(inp)[2]
if type == 'global_avg':
x1 = GlobalAveragePooling2D()(inp)
elif type == 'global_max':
x1 = GlobalMaxPooling2D()(inp)
x1_rep = K.repeat(x1, h * w)
x1_rep = Reshape((K.int_shape(x1)[1], h, w))(x1_rep)
x1_rep = K.permute_dimensions(x1_rep, (0, 2, 3, 1))
return x1_rep
def freq_split(inp, n_split_freqs, f_split_freqs):
"""
:param inp:
:param n_split_freqs:
:param f_split_freqs:
:return:
"""
if n_split_freqs == 2:
x1 = inp[:, 0:f_split_freqs[0], :, :]
x2 = inp[:, f_split_freqs[0]:, :, :]
return [x1, x2]
if | |
<reponame>bnb32/wholesome_bot
from wholesomebot.environment.emotes import emotes
import wholesomebot.environment.clean_info as cinfo
import wholesomebot.environment.settings as cfg
import wholesomebot.misc as misc
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
import re
import random
import pkg_resources
from symspellpy import SymSpell
from collections import defaultdict
from tqdm import tqdm
from emoji import demojize
from googletrans import Translator
import dask.dataframe as dd
stop_words = set(stopwords.words('english'))
wordnet_lemmatizer = WordNetLemmatizer()
logger = misc.get_logger()
translator = Translator()
sym_spell = SymSpell(max_dictionary_edit_distance=2, prefix_length=7)
dictionary_path = pkg_resources.resource_filename("symspellpy", "frequency_dictionary_en_82_765.txt")
sym_spell.load_dictionary(dictionary_path, term_index=0, count_index=1)
sym_spell_seg = SymSpell(max_dictionary_edit_distance=0, prefix_length=7)
bigram_path = pkg_resources.resource_filename("symspellpy", "frequency_bigramdictionary_en_243_342.txt")
sym_spell_seg.load_dictionary(dictionary_path, term_index=0, count_index=1)
sym_spell.load_bigram_dictionary(bigram_path, term_index=0, count_index=2)
# Read in data
def read_data(dfile):
logger.info('Reading in data: %s' % dfile)
# data = pd.read_csv(dfile)
data = dd.read_csv(dfile)
texts = data['text'].astype(str)
y = data['is_offensive']
return texts, y
def correct_msg(line):
tmp = line
# tmp=filterEmotes(tmp)
tmp = misc.delete_usernames(tmp)
tmp = misc.prune_chars(tmp) # re.sub('[^A-Za-z0-9 ]+','',tmp)
tmp = misc.remove_reps(tmp, sym_spell.words)
return tmp
def segment_words(line):
if line != "":
return sym_spell_seg.word_segmentation(line.strip()).segmented_string
else:
return line
def my_tokenizer(s):
words = s.lower().split()
words = [misc.prune_chars(w) for w in words if w not in stop_words]
return my_lemmatizer(words)
def remove_stopwords(words):
return [word for word in words if word not in stop_words]
def my_lemmatizer(words):
return [wordnet_lemmatizer.lemmatize(w) for w in words]
def preproc_words(texts):
return [my_tokenizer(text) for text in texts]
def join_words(lines):
return [' '.join(line) for line in lines]
def filter_emotes(line):
tmp = line
for emote in emotes:
tmp = re.sub(emote, '', tmp, flags=re.I)
return tmp.rstrip().lstrip()
def filter_all_emotes(texts, y):
tmp_texts = []
tmp_y = []
for t, v in zip(texts, y):
tmp = filter_emotes(t)
if tmp.strip() != '':
tmp_texts.append(tmp)
tmp_y.append(v)
return tmp_texts, tmp_y
def write_data(outfile, texts, y):
f = open(outfile, 'w', encoding='utf-8')
logger.info(f"Writing data: {outfile}")
f.write('is_offensive,text\n')
for t, v in zip(texts, y):
if re.sub('[^A-Za-z]+', '', t) != "":
if int(v) == 1:
f.write(f'{v},"{t}"\n')
for t, v in zip(texts, y):
if re.sub('[^A-Za-z]+', '', t) != "":
if int(v) == 0:
f.write(f'{v},"{t}"\n')
f.close()
def check_msgs(line, checks):
for cp in checks:
pattern = r'\b{}*\?*\b'.format(cp + cp[-1])
if re.search(pattern, line, re.I):
return True
return False
def trim_log(texts, y, frac):
tnew = []
ynew = []
for t, v in zip(texts, y):
if int(v) == 0:
choice = random.choices([False, True], weights=[1 - frac, frac])[0]
if choice:
ynew.append(v)
tnew.append(t)
elif int(v) == 1:
ynew.append(v)
tnew.append(t)
return tnew, ynew
def contains_link(text):
if any(link in text for link in cinfo.lstrings):
return True
else:
return False
class LogCleaning:
def __init__(self, rawfile, cleanfile, wc, review_decisions=False):
self.rawfile = rawfile
self.cleanfile = cleanfile
self.lines = None
self.mem = MsgMemory()
self.wc = wc
self.review_decisions = review_decisions
@staticmethod
def is_valid_line(line):
line_starts = ['BAN:', 'MOD_ACTION:', 'DELETED:']
if line.startswith(tuple(line_starts + ['<'])):
return True
else:
return False
def read_log(self):
# read raw log
logger.info('Reading log: %s' % self.rawfile)
f = open(self.rawfile)
lines = f.readlines()
f.close()
lines = [line.lstrip() for line in lines]
return lines
def prep_log(self):
# prep log
self.lines = self.read_log()
return [misc.delete_usernames(re.sub('"', '\'', line)).rstrip('\n').rstrip().lstrip() for line in self.lines if
self.is_valid_line(line)]
def clean_log(self):
# prep log
lines = self.prep_log()
self.mem.build_full_memory(lines)
self.mem.chunk_memory()
memory = self.mem.chunks
cmsgs = []
bmsgs = []
tocheck = []
ctmp = []
probs = []
banned_user_count = 0
clean_user_count = 0
if self.review_decisions:
logger.info("Preparing previous bot decisions for review")
for user in tqdm(memory):
for m in memory[user]:
if m['mod'] == cfg.NICK:
tocheck.append(m['msg'])
else:
logger.info("Dividing messages into 'bad' and 'other' for each user")
for user in tqdm(memory):
if (user not in cinfo.ignore_users and
not memory[user][0]['isVip'] and
not memory[user][0]['isMod'] and
not memory[user][0]['isPartner']):
if any(m['banned'] for m in memory[user]):
banned_user_count += 1
else:
clean_user_count += 1
for m in memory[user]:
if ((m['isSub'] is False or m['isSub'] is True) and
not contains_link(m['msg'])):
for w in cinfo.whitelist:
m['msg'] = re.sub(w, '', m['msg'], flags=re.I)
if m['mod'] == cfg.NICK:
tocheck.append(m['msg'])
elif ((m['banned'] or m['deleted']) and
m['mod'] in cinfo.ignore_actions):
pass
elif m['banned'] and m['mod'] in cinfo.ban_checks:
tocheck.append(m['msg'])
elif m['deleted'] and m['mod'] in cinfo.delete_checks:
tocheck.append(m['msg'])
elif m['deleted'] and m['mod'] is None:
tocheck.append(m['msg'])
elif m['deleted']:
bmsgs.append(m['msg'])
elif m['banned']:
bmsgs.append(m['msg'])
else:
ctmp.append(m['msg'])
logger.info("Banned/timed-out users: %s" % banned_user_count)
logger.info("Clean users: %s" % clean_user_count)
logger.info("Dividing 'other' into clean, to-check, and bad")
if self.wc is not None:
logger.info("Calculating probabilities")
probs = self.wc.predict_prob(ctmp)
for n, text in tqdm(enumerate(ctmp)):
if self.wc is not None and probs[n] > cfg.CHECK_PMIN:
tocheck.append(text)
elif check_msgs(text.lower(), cinfo.blacklist):
bmsgs.append(text)
elif check_msgs(text.lower(), cinfo.graylist):
tocheck.append(text)
else:
cmsgs.append(text)
texts = []
y = []
logger.info("Appending to-check messages: %s" % (len(tocheck)))
for text in tqdm(tocheck):
if (check_msgs(text.lower(), cinfo.blacklist) or
misc.remove_special_chars(text.lower()) in
(misc.remove_special_chars(bmsg.lower()) for bmsg in bmsgs)):
rating = '1'
elif (misc.remove_special_chars(text.lower()) in
(misc.remove_special_chars(cmsg.lower()) for cmsg in cmsgs)):
rating = '0'
else:
trans_result = translator.translate(text)
if trans_result.src != 'en':
rating = input(f'\nCheck: {text}\nTranslated: {trans_result.text}\n')
else:
rating = input(f'\nCheck: {text}\n')
if rating not in ['0', '1', 's']:
rating = '0'
if rating == 's':
logger.info("Skipped: %s" % text)
elif rating == '1':
bmsgs.append(text)
elif rating == '0':
cmsgs.append(text)
logger.info("Appending banned messages: %s" % (len(bmsgs)))
for text in tqdm(bmsgs):
texts.append(text)
y.append('1')
logger.info("Appending clean messages: %s" % (len(cmsgs)))
for text in tqdm(cmsgs):
texts.append(text)
y.append('0')
# logger.info("Filtering emotes")
# texts,y=filterAllEmotes(texts,y)
write_data(self.cleanfile, texts, y)
def get_info_from_chatty(line):
info = misc.info()
user = line.split('>')[0]
user = user.strip('<')
info['isSub'] = misc.is_sub_chatty(user)
info['isMod'] = misc.is_mod_chatty(user)
info['isVip'] = misc.is_vip_chatty(user)
info['isPartner'] = misc.is_partner_chatty(user)
info['isPleb'] = misc.is_pleb_chatty(user)
info['msg'] = line[line.index('>') + 1:].lstrip()
info['user'] = misc.remove_special_chars(user).lower()
return info
class MsgMemory:
def __init__(self):
self.memory = defaultdict(list)
self.chunks = defaultdict(list)
self.msg_limit = 1
def add_msg(self, info):
entry = {k: v for k, v in info.items() if k != 'user'}
entry['msg'] = demojize(info['msg'])
self.memory[info['user']].append(entry)
return self
def del_msg(self, user):
self.memory[user].pop(0)
return self
def check_msgs(self, user):
if len(self.memory[user]) > 0:
if self.memory[user][-1]['banned']:
self.clear_user(user)
elif len(self.memory[user]) > self.msg_limit:
self.del_msg(user)
return self
def update_user_ban(self, user, banned=True):
self.memory[user][-1]['banned'] = banned
return self
def clear_user(self, user):
self.memory[user] = []
return self
def build_full_memory(self, lines):
logger.info("Building full memory")
for line in tqdm(lines):
if line.startswith('<'):
info = get_info_from_chatty(line)
self.add_msg(info)
elif line.startswith('BAN:'):
try:
user = line.split()[1].lower()
self.memory[user][-1]['banned'] = True
except Exception:
pass
elif line.startswith('DELETED:'):
try:
user = line.split()[1].lower()
self.memory[user][-1]['deleted'] = True
except Exception:
pass
elif line.startswith('MOD_ACTION:'):
try:
user = line.split()[3].lower()
mod = line.split()[1].lower()
self.memory[user][-1]['mod'] = mod
except Exception:
pass
return self
def chunk_memory(self):
info = None
for user in self.memory:
count = 0
for m in self.memory[user]:
if count == 0:
info = misc.info()
if info['msg'] is None:
info['msg'] = m['msg'] + '. '
else:
info['msg'] += m['msg'] + '. '
for k in m:
if k != 'msg' and k != 'mod':
info[k] = m[k]
if k == 'mod' and m['mod'] is not None:
info['mod'] = m['mod']
count += 1
if (count == self.msg_limit or
count == len(self.memory[user]) or
info['banned'] or
info['deleted']):
entry = {k: v for k, v in info.items() if k != 'user'}
self.chunks[user].append(entry)
count = 0
return self
def chunk_recent(self, info):
self.check_msgs(info['user'])
self.add_msg(info)
msgs = [m['msg'].strip('\r') for m in self.memory[info['user']][-self.msg_limit:]]
return '. '.join(msgs)
def check_probs(infile, outfile, bounds, wc):
tmp_texts, tmp_y = read_data(infile)
texts = []
y = []
to_check = []
y_check = []
probs = []
logger.info("Separating to_check from all data")
if wc is not None:
logger.info("Calculating probabilities")
probs = wc.predict_prob(tmp_texts)
for n, t in tqdm(enumerate(tmp_texts)):
if (wc is not None and bounds[0] <= n <= bounds[1] and
tmp_y[n] == 0 and not contains_link(t) and
probs[n] > cfg.CHECK_PMIN):
to_check.append(t)
y_check.append(tmp_y[n])
else:
texts.append(t)
y.append(tmp_y[n])
logger.info("Reclassifying to_check messages")
end_check = False
for n, t in tqdm(enumerate(to_check)):
if not end_check:
rating = input('\nCheck: %s, %s' % (y_check[n], t))
if rating not in ['0', '1', 's', 'e']:
rating = y_check[n]
if rating == 's':
logger.info("Skipped: %s" % t)
if rating == 'e':
end_check = True
if rating in ['0', '1']:
texts.append(t)
y.append(rating)
else:
for i in range(n, len(to_check)):
texts.append(to_check[i])
y.append(y_check[i])
write_data(outfile, texts, y)
def check_phrases(infile, outfile):
tmp_texts, tmp_y = read_data(infile)
phrases = input('\nEnter phrases to check\n')
phrases = phrases.split(',')
texts = []
y = []
to_check = []
y_check = []
logger.info("Separating to_check from all data")
for n, t in tqdm(enumerate(tmp_texts)):
if check_msgs(t.lower(), phrases):
to_check.append(t)
y_check.append(tmp_y[n])
else:
texts.append(t)
y.append(tmp_y[n])
logger.info("Reclassifying to_check messages")
end_check = False
for n, t in tqdm(enumerate(to_check)):
if not end_check:
rating = input('\nCheck: %s, %s' % (y_check[n], t))
if rating not in ['0', '1', 's', 'e']:
rating = y_check[n]
if rating == 's':
logger.info("Skipped: %s" % t)
if rating == 'e':
end_check = True
if rating in ['0', '1']:
texts.append(t)
y.append(rating)
else:
for i | |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import glob
import os
import re
#------------------------------------------------------
#Define a function that can calculate the calibration parameter and retunr them
def camera_cal(images, nx, ny):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Step through the list and search for chessboard corners
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None)
#print(corners)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
image_shape = gray.shape[::-1]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, image_shape, None, None)
return ret, mtx, dist, rvecs, tvecs
#-------------------------------------------------------
#Define a function that disorts an image given the calibration parameter
def undistort_image(image_raw, mtx, dist):
dst = cv2.undistort(image_raw, mtx, dist, None, mtx)
return dst
#---------------------------------------------------------
# Define Function to convert the image threshiold
def chanel_function(img, s_thresh, l_thresh, sx_thresh, sy_thresh, red_tresh, sobel_kernel):
#img = np.copy(img)
# Convert to HLS color space and separate the V channel
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
l_channel = hls[:,:,1]
s_channel = hls[:,:,2]
#Convert to Red channel
img_red = img[:,:,0]
#convert to gray
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
#Red Chanel Threshold
red_binary = np.zeros_like(img_red)
red_binary[(img_red >= red_tresh[0]) & (img_red <= red_tresh[1])] = 1
# Sobel x
#sobelx = cv2.Sobel(l_channel, cv2.CV_64F, 1, 0) # Take the derivative in x
#sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize = sobel_kernel) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= sx_thresh[0]) & (scaled_sobel <= sx_thresh[1])] = 1
# Threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
# Stack each channel
color_binary = np.dstack((red_binary, sxbinary, s_binary)) * 255
#cv2.destroyAllWindows()
return color_binary
#-------------------------------------------------
#region of interest from project 1
def region_of_interest(img, vertices):
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
#Define Function for perspective transforms
def perspective_transform(img):
imshape = img.shape
vertices = np.array([[(60,imshape[0]),(imshape[1]//2-90, 450), (imshape[1]//2+90, 450), (imshape[1]-60,imshape[0])]], dtype=np.int32)
masked = region_of_interest(img, vertices)
img_size = (img.shape[1], img.shape[0])
src = np.float32([[585, 455], [705, 455], [1130, 720], [190, 720]])
offset = 200
dst = np.float32([[offset, 0], [img_size[0]-offset, 0], [img_size[0]-offset, img_size[1]], [offset, img_size[1]]])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
warped = cv2.warpPerspective(img, M, img_size)
return warped, Minv, img_size
#-------------------------------------------------
#Define Function for Lane Line Detection Sliding Sliding Windows
def lane_finding(img): #, nwindows, margin, minipix):
# HYPERPARAMETERS
# Choose the number of sliding windows
nwindows = 9
# Set the width of the windows +/- margin
margin = 100
# Set minimum number of pixels found to recenter window
minpix = 50
bottom_half = img[img.shape[0]//2:,:]
histogram = np.sum(bottom_half, axis=0)
out_img = np.dstack((img, img, img))
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
leftx_base = np.argmax(histogram[:midpoint])
rightx_base = np.argmax(histogram[midpoint:]) + midpoint
# Set height of windows - based on nwindows above and image shape
window_height = np.int(img.shape[0]//nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in nwindows
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
### TO-DO: Find the four below boundaries of the window ###
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low),
(win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),
(win_xright_high,win_y_high),(0,255,0), 2)
### TO-DO: Identify the nonzero pixels in x and y within the window ###
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
### TO-DO: If you found > minpix pixels, recenter next window ###
### (`right` or `leftx_current`) on their mean position ###
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices (previously was a list of lists of pixels)
try:
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
except ValueError:
# Avoids an error if the above is not implemented fully
pass
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_polynomial(img):
# Find our lane pixels first
leftx, lefty, rightx, righty, out_img = lane_finding(img)
### TO-DO: Fit a second order polynomial to each using `np.polyfit` ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
try:
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
except TypeError:
# Avoids an error if `left` and `right_fit` are still none or incorrect
print('The function failed to fit a line!')
left_fitx = 1*ploty**2 + 1*ploty
right_fitx = 1*ploty**2 + 1*ploty
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
# Plots the left and right polynomials on the lane lines
#plt.plot(left_fitx, ploty, color='yellow')
#plt.plot(right_fitx, ploty, color='yellow')
return out_img, left_fit, right_fit, ploty, left_fitx, right_fitx
def measure_curvature_real(ploty, left_fit_cr, right_fit_cr, image_mid):
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# Start by generating our fake example data
# Make sure to feed in your real data instead in your project!
#ploty, left_fit_cr, right_fit_cr = generate_data(ym_per_pix, xm_per_pix)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
# Calculation of R_curve (radius of curvature)
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
left_pos = (left_fit_cr[0]*y_eval**2 + left_fit_cr[1]*y_eval + left_fit_cr[2])
right_pos = (right_fit_cr[0]*y_eval**2 + right_fit_cr[1]*y_eval + right_fit_cr[2])
lanes_mid = (left_pos+right_pos)/2.0
distance_from_mid = image_mid - lanes_mid
offset_mid = xm_per_pix*distance_from_mid
return left_curverad, right_curverad, offset_mid
#######################################################
#######################################################
##############################
#NanoDegree Project 2 - Rubric
#############################
#--------------------------------------------------------
#Rubric 1: Compute the camera calibration matrix and distortion coefficients given a set of chessboard images.
nx = 9 #TODO: enter the number of inside corners in x
ny = 6 #TODO: enter the number of inside corners in y
#store all calibration images in images
images = glob.glob("./camera_cal/calibration*.jpg")
#Call Camera Cal Function for matrix and coefficients
ret, mtx, dist, rvecs, tvecs = camera_cal(images, nx, ny)
#--------------------------------------------------------
# Rubric 2: Apply a distortion correction to raw images.
# Pipline from Project 1
filepath = "test_images/"
outputpath = "output_images/"
#pattern = re.compile("^.+processed.jpg$")
fileExtension = | |
use
aidx = [atom_map[i] - 1 for i in unmapped]
if prim_fn is geometric.internal.OutOfPlane:
aidx = ImproperDict.key_transform(aidx)
# this is needed for the code below, where we use
# unmapped as the key, which must be sorted (in the
# same way openff sorts
unmapped = ImproperDict.key_transform(unmapped)
param_name = "i"
else:
unmapped = ValenceDict.key_transform(unmapped)
aidx = ValenceDict.key_transform(aidx)
new_ic = prim_fn(*aidx)
if new_ic not in ic_prims.Internals:
# Is it ok if we add extra ICs to the primitive list?
# if prim_fn is not geometric.internal.OutOfPlane:
# print("Adding an IC:", new_ic, "which may indicate missing coverage by the FF!")
ic_prims.add(new_ic)
# if the tuple has no param (impropers), skip it
try:
if self._to.chembit:
prim = primitives[unmapped]
elif graph_ic is not None:
prim = graph_ic[unmapped]
else:
prim = prim_to_graph[param_name[0]].from_string_list(
primitives[unmapped], sorted=True
)
# prim_key = self._prim_tab.get(prim)
# prim_key = self._get_prim_key(prim)
prim_map = self._prim.get(entry.payload)
if prim_map is None:
self._prim[entry.payload] = {unmapped: prim}
else:
prim_map[unmapped] = prim
except Exception as e:
breakpoint()
print("Issue with assigning primitive! Error message:")
print(e)
def _get_prim_key(self, prim):
self._prim_tab
# to estimate the FCs, we need the gradients and the hessians
if QCA[mol.parent].payload == "Gradient":
for hessian_node in QCA.node_iter_depth_first(
mol, select="Hessian"
):
xyz = QCA.db[mol.payload]["data"].geometry
hess = QCA.db[hessian_node.payload]["data"].return_result
grad = np.array(
QCA.db[hessian_node.payload]["data"].extras["qcvars"][
"CURRENT GRADIENT"
]
)
ic_hess = ic_prims.calcHess(xyz, grad, hess)
# eigs = np.linalg.eigvalsh(ic_hess)
# s = np.argsort(np.diag(ic_hess))
# force_vals = eigs
force_vals = np.diag(ic_hess)
# ic_vals = [ic_prims.Internals[i] for i in s]
ic_vals = ic_prims.Internals
for aidx, val in zip(ic_vals, force_vals):
key = tuple(
map(
lambda x: map_inv[int(x) - 1],
str(aidx).split()[1].split("-"),
)
)
if type(aidx) is geometric.internal.OutOfPlane:
key = ImproperDict.key_transform(key)
else:
key = ValenceDict.key_transform(key)
# no conversion, this is done elsewhere, e.g. set_parameter
# if type(aidx) is geometric.internal.Distance:
# val = val / (offsb.tools.const.bohr2angstrom ** 2)
# val *= offsb.tools.const.hartree2kcalmol
# if mol.payload == "QCM-1396980":
# breakpoint()
# print("HI")
if mol.payload not in self._fc:
self._fc[mol.payload] = {key: val}
else:
self._fc[mol.payload][key] = val
def _calculate_ic_force_constants(self):
"""
TODO: Make an operation out of this rather than do it here
"""
if self._ic is not None:
return
self._ic = {}
self._fc = {}
self._prim = {}
import geometric.internal
import geometric.molecule
import offsb.ui.qcasb
QCA = self._po.source.source
# need this for measuring geometry
# should only need to do it once
qcasb = offsb.ui.qcasb.QCArchiveSpellBook(QCA=QCA)
vtable = {
"Bonds": qcasb.measure_bonds,
"Angles": qcasb.measure_angles,
"ImproperTorsions": qcasb.measure_outofplanes,
"ProperTorsions": qcasb.measure_dihedrals,
}
prim_table = {
"Bonds": geometric.internal.Distance,
"Angles": geometric.internal.Angle,
"ImproperTorsions": geometric.internal.OutOfPlane,
"ProperTorsions": geometric.internal.Dihedral,
}
ignore_tab = {
"b": "Bonds",
"a": "Angles",
"i": "ImproperTorsions",
"t": "ProperTorsions",
}
ignore = [ignore_tab[x] for x in self._ignore_parameters if x in ignore_tab]
# should return a dict of the kind
#
# well, the measure already collects what is needed
# need a way to transform indices to primitives st we have prim: measure
# so we could just keep a indices -> prim, which is what _to is
ic_op = offsb.op.internal_coordinates.InteralCoordinateGeometricOperation(
QCA, "ic", verbose=True
)
ic_op.processes = None
ic_op.apply()
# need this for measuring geometry
# should only need to do it once
qcasb = offsb.ui.qcasb.QCArchiveSpellBook(QCA=QCA)
qcasb.verbose = True
labeler = self.labels()
# self.to_smirnoff_xml("tmp.offxml", verbose=False)
# labeler = qcasb.assign_labels_from_openff("tmp.offxml", "tmp.offxml")
# self._labeler = labeler
self._ic = qcasb.measure_internal_coordinates(ignore=ignore)
# for ic_type, measure_function in vtable.items():
# # the geometry measurements
# ic = measure_function(ic_type)
# ic.source.source = QCA
# ic.verbose = True
# ic.apply()
# self._ic[ic_type] = ic
n_entries = len(list(QCA.iter_entry()))
for entry in tqdm.tqdm(
QCA.iter_entry(), total=n_entries, desc="IC generation", ncols=80
):
self._prim[entry.payload] = {}
# need to unmap... sigh
smi = QCA.db[entry.payload]["data"].attributes[
"canonical_isomeric_explicit_hydrogen_mapped_smiles"
]
rdmol = offsb.rdutil.mol.build_from_smiles(smi)
atom_map = offsb.rdutil.mol.atom_map(rdmol)
map_inv = offsb.rdutil.mol.atom_map_invert(atom_map)
primitives = self._to.db[entry.payload]["data"]
graph_ic = None
graph = self._to.db[entry.payload]["data"].get("graph", None)
for mol in QCA.node_iter_depth_first(entry, select="Molecule"):
if graph is not None and graph_ic is None:
graph_ic = {
"b": graph.bonds(),
"a": graph.angles(),
"i": graph.outofplanes(),
"t": graph.torsions(),
"n": graph.atom,
}
graph_ic = {k: v for x in graph_ic.values() for k, v in x.items()}
# with tempfile.NamedTemporaryFile(mode="wt") as f:
# offsb.qcarchive.qcmol_to_xyz(
# QCA.db[mol.payload]["data"], fnm=f.name
# )
# gmol = geometric.molecule.Molecule(f.name, ftype="xyz")
# # with open("out.xyz", mode="wt") as f:
# # offsb.qcarchive.qcmol_to_xyz(QCA.db[mol.payload]["data"], fd=f)
# ic_prims = geometric.internal.PrimitiveInternalCoordinates(
# gmol,
# build=True,
# connect=True,
# addcart=False,
# constraints=None,
# cvals=None,
# )
ic_prims = ic_op.db[entry.payload]["data"]
for ic_type, prim_fn in prim_table.items():
for unmapped, param_name in labeler.db[entry.payload]["data"][
ic_type
].items():
# forward map to QCA index, which is what ICs use
aidx = [atom_map[i] - 1 for i in unmapped]
if prim_fn is geometric.internal.OutOfPlane:
aidx = ImproperDict.key_transform(aidx)
# this is needed for the code below, where we use
# unmapped as the key, which must be sorted (in the
# same way openff sorts
unmapped = ImproperDict.key_transform(unmapped)
param_name = "i"
else:
unmapped = ValenceDict.key_transform(unmapped)
aidx = ValenceDict.key_transform(aidx)
new_ic = prim_fn(*aidx)
if new_ic not in ic_prims.Internals:
# Is it ok if we add extra ICs to the primitive list?
# if prim_fn is not geometric.internal.OutOfPlane:
# print("Adding an IC:", new_ic, "which may indicate missing coverage by the FF!")
ic_prims.add(new_ic)
# if the tuple has no param (impropers), skip it
try:
if self._to.chembit:
prim = primitives[unmapped]
elif graph_ic is not None:
prim = graph_ic[unmapped]
else:
prim = prim_to_graph[param_name[0]].from_string_list(
primitives[unmapped], sorted=True
)
prim_map = self._prim.get(entry.payload)
if prim_map is None:
self._prim[entry.payload] = {unmapped: prim}
else:
prim_map[unmapped] = prim
except Exception as e:
breakpoint()
print("Issue with assigning primitive! Error message:")
print(e)
# to estimate the FCs, we need the gradients and the hessians
if QCA[mol.parent].name == "Gradient":
for hessian_node in QCA.node_iter_depth_first(
mol, select="Hessian"
):
xyz = QCA.db[mol.payload]["data"].geometry
hess = QCA.db[hessian_node.payload]["data"].return_result
grad = np.array(
QCA.db[hessian_node.payload]["data"].extras["qcvars"][
"CURRENT GRADIENT"
]
)
ic_hess = ic_prims.calcHess(xyz, grad, hess)
# eigs = np.linalg.eigvalsh(ic_hess)
# s = np.argsort(np.diag(ic_hess))
# force_vals = eigs
force_vals = np.diag(ic_hess)
# ic_vals = [ic_prims.Internals[i] for i in s]
ic_vals = ic_prims.Internals
for aidx, val in zip(ic_vals, force_vals):
key = tuple(
map(
lambda x: map_inv[int(x) - 1],
str(aidx).split()[1].split("-"),
)
)
if type(aidx) is geometric.internal.OutOfPlane:
key = ImproperDict.key_transform(key)
else:
key = ValenceDict.key_transform(key)
# no conversion, this is done elsewhere, e.g. set_parameter
# if type(aidx) is geometric.internal.Distance:
# val = val / (offsb.tools.const.bohr2angstrom ** 2)
# val *= offsb.tools.const.hartree2kcalmol
# if mol.payload == "QCM-1396980":
# breakpoint()
# print("HI")
if mol.payload not in self._fc:
self._fc[mol.payload] = {key: val}
else:
self._fc[mol.payload][key] = val
def print_label_assignments(self, hide_ignored=True):
"""
print the entry, atoms, prim, label, label smarts
"""
handlers = [
self[x]
for x in self.root().children
if self[x].payload in self.parameterize_handlers
]
QCA = self._po.source.source
ciehms = "canonical_isomeric_explicit_hydrogen_mapped_smiles"
print("\nPRINT OUT OF MOLECULE ASSIGNMENTS\n")
self.to_smirnoff_xml("tmp.offxml", verbose=False, hide_ignored=hide_ignored)
labeler = offsb.ui.qcasb.QCArchiveSpellBook(QCA=QCA).assign_labels_from_openff(
"tmp.offxml", "tmp.offxml"
)
params = {
lbl: param
for ph in handlers
for lbl, param in labeler.db["ROOT"]["data"][ph.payload].items()
}
n_entries = 0
for entry in QCA.iter_entry():
n_entries += 1
for i, entry in enumerate(QCA.iter_entry(), 1):
labels = {
key_transformer[ph.payload](aidx): lbl
for ph in handlers
for aidx, lbl in labeler.db[entry.payload]["data"][ph.payload].items()
}
index_str = "{:6d}/{:6d}".format(i, n_entries)
print(" ", index_str, entry.payload)
print(
" ",
index_str,
QCA.db[entry.payload]["data"].attributes[ciehms],
)
# prims = self._prim[entry.payload]
graph = self._to.db[entry.payload]["data"].get("graph")
graph_ic = None
if graph is not None:
graph_ic = {
"b": graph.bonds(),
"a": graph.angles(),
"i": graph.outofplanes(),
"t": graph.torsions(),
"n": graph.atoms(),
}
graph_ic = {k: v for x in graph_ic.values() for k, v in x.items()}
for j, aidx in enumerate(labels, 1):
lbl = labels[aidx]
if lbl is None:
breakpoint()
continue
if hide_ignored and lbl in self._ignore_parameters:
continue
if graph_ic is not None:
smarts = graph_ic[aidx].to_smarts()
elif self._to.chembit:
smarts = self._to.db[entry.payload]["data"][aidx].to_smarts()
else:
smarts = "".join(self._to.db[entry.payload]["data"][aidx])
measurement = self._ic
index_str = "{:6d}/{:6d}".format(j, len(labels))
print(
" ",
index_str,
f"{lbl:5s}",
aidx,
params[lbl]["smirks"],
smarts,
# prims[aidx],
)
print("---------------------------------")
print("#################################")
sys.stdout.flush()
###
def labels(self, force=False):
QCA = self._po.source.source
self._qca = QCA
qcasb = offsb.ui.qcasb.QCArchiveSpellBook(QCA=QCA)
qcasb.verbose = True
if self._labeler is None or force:
self.to_smirnoff_xml("tmp.offxml", verbose=False)
labeler = qcasb.assign_labels_from_openff("tmp.offxml", "tmp.offxml")
self._labeler = labeler
else:
return self._labeler
return labeler
def _create_universe(self):
QCA = self._qca
n_entries = len(list(QCA.iter_entry()))
for entry in tqdm.tqdm(
QCA.iter_entry(),
total=n_entries,
desc="Generating universe",
ncols=80,
disable=True,
):
graph = self._to.db[entry.payload]["data"].get("graph", None)
if graph is None:
continue
self.atom_universe = functools.reduce(
lambda x, y: x + | |
reserved public IP address created with the virtual cloud network.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
OCID of the reserved public IP address created with the virtual cloud network.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetNetworkLoadBalancersFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetNetworkLoadBalancersNetworkLoadBalancerCollectionResult(dict):
def __init__(__self__, *,
items: Sequence['outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemResult']):
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence['outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemResult']:
return pulumi.get(self, "items")
@pulumi.output_type
class GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemResult(dict):
def __init__(__self__, *,
compartment_id: str,
defined_tags: Mapping[str, Any],
display_name: str,
freeform_tags: Mapping[str, Any],
id: str,
ip_addresses: Sequence['outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressResult'],
is_preserve_source_destination: bool,
is_private: bool,
lifecycle_details: str,
network_security_group_ids: Sequence[str],
reserved_ips: Sequence['outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemReservedIpResult'],
state: str,
subnet_id: str,
system_tags: Mapping[str, Any],
time_created: str,
time_updated: str):
"""
:param str compartment_id: The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancers to list.
:param Mapping[str, Any] defined_tags: Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
:param str display_name: A filter to return only resources that match the entire display name given.
:param Mapping[str, Any] freeform_tags: Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
:param str id: OCID of the reserved public IP address created with the virtual cloud network.
:param Sequence['GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressArgs'] ip_addresses: An array of IP addresses.
:param bool is_preserve_source_destination: When enabled, the skipSourceDestinationCheck parameter is automatically enabled on the load balancer VNIC. Packets are sent to the backend set without any changes to the source and destination IP.
:param bool is_private: Whether the network load balancer has a virtual cloud network-local (private) IP address.
:param str lifecycle_details: A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
:param Sequence[str] network_security_group_ids: An array of network security groups [OCIDs](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) associated with the network load balancer.
:param str state: A filter to return only resources that match the given lifecycle state.
:param str subnet_id: The subnet in which the network load balancer is spawned [OCIDs](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)."
:param Mapping[str, Any] system_tags: Key-value pair representing system tags' keys and values scoped to a namespace. Example: `{"bar-key": "value"}`
:param str time_created: The date and time the network load balancer was created, in the format defined by RFC3339. Example: `2020-05-01T21:10:29.600Z`
:param str time_updated: The time the network load balancer was updated. An RFC3339 formatted date-time string. Example: `2020-05-01T22:10:29.600Z`
"""
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "defined_tags", defined_tags)
pulumi.set(__self__, "display_name", display_name)
pulumi.set(__self__, "freeform_tags", freeform_tags)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "ip_addresses", ip_addresses)
pulumi.set(__self__, "is_preserve_source_destination", is_preserve_source_destination)
pulumi.set(__self__, "is_private", is_private)
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
pulumi.set(__self__, "network_security_group_ids", network_security_group_ids)
pulumi.set(__self__, "reserved_ips", reserved_ips)
pulumi.set(__self__, "state", state)
pulumi.set(__self__, "subnet_id", subnet_id)
pulumi.set(__self__, "system_tags", system_tags)
pulumi.set(__self__, "time_created", time_created)
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the compartment containing the network load balancers to list.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A filter to return only resources that match the entire display name given.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
OCID of the reserved public IP address created with the virtual cloud network.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddresses")
def ip_addresses(self) -> Sequence['outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressResult']:
"""
An array of IP addresses.
"""
return pulumi.get(self, "ip_addresses")
@property
@pulumi.getter(name="isPreserveSourceDestination")
def is_preserve_source_destination(self) -> bool:
"""
When enabled, the skipSourceDestinationCheck parameter is automatically enabled on the load balancer VNIC. Packets are sent to the backend set without any changes to the source and destination IP.
"""
return pulumi.get(self, "is_preserve_source_destination")
@property
@pulumi.getter(name="isPrivate")
def is_private(self) -> bool:
"""
Whether the network load balancer has a virtual cloud network-local (private) IP address.
"""
return pulumi.get(self, "is_private")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> str:
"""
A message describing the current state in more detail. For example, can be used to provide actionable information for a resource in Failed state.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter(name="networkSecurityGroupIds")
def network_security_group_ids(self) -> Sequence[str]:
"""
An array of network security groups [OCIDs](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) associated with the network load balancer.
"""
return pulumi.get(self, "network_security_group_ids")
@property
@pulumi.getter(name="reservedIps")
def reserved_ips(self) -> Sequence['outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemReservedIpResult']:
return pulumi.get(self, "reserved_ips")
@property
@pulumi.getter
def state(self) -> str:
"""
A filter to return only resources that match the given lifecycle state.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> str:
"""
The subnet in which the network load balancer is spawned [OCIDs](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm)."
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="systemTags")
def system_tags(self) -> Mapping[str, Any]:
"""
Key-value pair representing system tags' keys and values scoped to a namespace. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "system_tags")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the network load balancer was created, in the format defined by RFC3339. Example: `2020-05-01T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> str:
"""
The time the network load balancer was updated. An RFC3339 formatted date-time string. Example: `2020-05-01T22:10:29.600Z`
"""
return pulumi.get(self, "time_updated")
@pulumi.output_type
class GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressResult(dict):
def __init__(__self__, *,
ip_address: str,
is_public: bool,
reserved_ip: 'outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressReservedIpResult'):
"""
:param str ip_address: An IP address. Example: `192.168.0.3`
:param bool is_public: Whether the IP address is public or private.
:param 'GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressReservedIpArgs' reserved_ip: An object representing a reserved IP address to be attached or that is already attached to a network load balancer.
"""
pulumi.set(__self__, "ip_address", ip_address)
pulumi.set(__self__, "is_public", is_public)
pulumi.set(__self__, "reserved_ip", reserved_ip)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> str:
"""
An IP address. Example: `192.168.0.3`
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isPublic")
def is_public(self) -> bool:
"""
Whether the IP address is public or private.
"""
return pulumi.get(self, "is_public")
@property
@pulumi.getter(name="reservedIp")
def reserved_ip(self) -> 'outputs.GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressReservedIpResult':
"""
An object representing a reserved IP address to be attached or that is already attached to a network load balancer.
"""
return pulumi.get(self, "reserved_ip")
@pulumi.output_type
class GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemIpAddressReservedIpResult(dict):
def __init__(__self__, *,
id: str):
"""
:param str id: OCID of the reserved public IP address created with the virtual cloud network.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
OCID of the reserved public IP address created with the virtual cloud network.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetNetworkLoadBalancersNetworkLoadBalancerCollectionItemReservedIpResult(dict):
def __init__(__self__, *,
id: str):
"""
:param str id: OCID of the reserved public IP address created with the virtual cloud network.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
OCID of the reserved public IP address created with the virtual cloud network.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class GetNetworkLoadBalancersPoliciesFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, "regex")
@pulumi.output_type
class GetNetworkLoadBalancersPoliciesNetworkLoadBalancersPolicyCollectionResult(dict):
def __init__(__self__, *,
items: Sequence[str]):
"""
:param Sequence[str] items: Array of NetworkLoadBalancersPolicySummary objects.
"""
pulumi.set(__self__, "items", items)
@property
@pulumi.getter
def items(self) -> Sequence[str]:
"""
Array of NetworkLoadBalancersPolicySummary objects.
"""
return pulumi.get(self, "items")
@pulumi.output_type
class GetNetworkLoadBalancersProtocolsFilterResult(dict):
def __init__(__self__, *,
name: str,
values: Sequence[str],
regex: Optional[bool] = None):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "values", values)
if regex is not None:
pulumi.set(__self__, "regex", regex)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
return pulumi.get(self, "values")
@property
@pulumi.getter
def regex(self) -> Optional[bool]:
return pulumi.get(self, | |
Each tokenizer may provide different semantics with respect to this list,
and may ignore it altogether.
Args:
types_to_skip: Types (from the constants in the `token` module) or
`unified_tokenizer.TokenKind`. Note that some of those constants are
actually defined in the `tokenize` module.
"""
self.types_to_skip = types_to_skip
def replace_reserved_keywords(self, reserved):
"""Replaces the reserved keywords with the supplied list of strings.
Each tokenizer may provide different semantics with respect to the list
of reserved keywords, or ignore them altogether.
Args:
reserved: List of strings.
"""
self.reserved = reserved # Replace the old one entirely.
def update_mappings(self, mappings):
"""Replaces the character mappings with the supplied dictionary.
The intent for character mappings is to enable tokenizers that support them
to sanitize dangerous characters, such as newline and carriage return,
with a nicer symbol.
Each tokenizer may provide different semantics with respect to the
mappings, or ignore them altogether.
Args:
mappings: Dictionary of original to sanitized strings. Keys are expected
to have length 1.
Raises:
ValueError: if a key has length different from 1.
"""
check_mappings(mappings)
self.mappings = mappings
def get_mappings(self):
return self.mappings
def condition_full_tokens(
self, agnostic
):
"""Applies reserved keywords and character sanitization."""
filtered: Iterable[AbstractToken] = (
a for a in agnostic if a.kind not in self.types_to_skip)
# Now turn all reserved words, regardless of kind, into keywords.
with_reserved: Sequence[AbstractToken] = tuple(
dataclasses.replace(a, kind=TokenKind.KEYWORD)
if a.spelling in self.reserved else a
for a in filtered)
return with_reserved
def subtokenize_full_tokens(
self, agnostic
):
"""Performs heuristic splitting of full tokens."""
subtoken_lists = subtokenize_agnostic_tokens_in_place(
agnostic_tokens=agnostic,
max_output_token_length=self.max_output_token_length,
sanitization_mapping=self.mappings,
sentinel=SENTINEL)
return subtoken_lists
def tokenize(self, source_code):
"""Tokenizes via `tokenize_and_abstract`."""
try:
agnostic = self.tokenize_and_abstract(source_code)
except Exception as e:
raise ValueError('While trying to do language-specific tokenization for '
'the string:\n\n\n%r\n\n\n%s\n\n\n'
'we received error %r.' % (source_code, source_code, e))
conditioned = self.condition_full_tokens(agnostic)
multi_tokens = self.subtokenize_full_tokens(conditioned)
subtokens = flatten_subtoken_lists(multi_tokens)
return subtokens
def untokenize(self, token_list):
"""Untokenizes via `untokenize_abstract`."""
# Untokenize agnostic.
if (not token_list or token_list[-1] != quote_special(
TokenKind.EOS.name)):
raise ValueError('Token list %r should end with the EOS token %r.' %
(token_list,
quote_special(
TokenKind.EOS.name)))
whole_tokens = reconstitute_full_unsanitary_tokens(
token_list,
sanitization_mapping=self.mappings,
sentinel=SENTINEL)
return self.untokenize_abstract(whole_tokens)
def token_from_token_type(token_type):
"""Turns a token type into a reserved token string."""
# We use the tok_name dict from tokenize, not token. The former has
# NL and COMMENT and such, whereas the latter doesn't.
return quote_special(tokenize.tok_name[token_type])
# coding=utf-8
#
# ALL CREDIT GOES TO https://github.com/google-research/google-research/tree/master/cubert
#
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A Python tokenizer subclass of CuBertTokenizer."""
import keyword
import re
import tokenize
import typing
from typing import Any
from typing import List
from typing import Sequence
from typing import Tuple
from absl import logging
# from . import cubert_tokenizer
# from . import unified_tokenizer
class PythonTokenizer(CuBertTokenizer):
"""Tokenizer that extracts Python's lexical elements preserving strings."""
_TOKEN_TYPE_MAP = {
tokenize.COMMENT: TokenKind.COMMENT,
tokenize.DEDENT: TokenKind.KEYWORD,
tokenize.ENDMARKER: TokenKind.EOS,
tokenize.ERRORTOKEN: TokenKind.ERROR,
tokenize.INDENT: TokenKind.KEYWORD,
tokenize.NEWLINE: TokenKind.NEWLINE,
tokenize.NL: TokenKind.PUNCTUATION,
tokenize.NUMBER: TokenKind.NUMBER,
tokenize.OP: TokenKind.PUNCTUATION,
tokenize.STRING: TokenKind.STRING,
}
_REVERSE_TOKEN_MAP = {
token_from_token_type(tokenize.INDENT):
tokenize.INDENT,
token_from_token_type(tokenize.DEDENT):
tokenize.DEDENT,
quote_special(TokenKind.EOS.name):
tokenize.ENDMARKER,
quote_special(TokenKind.ERROR.name):
tokenize.ERRORTOKEN,
quote_special(TokenKind.NEWLINE.name):
tokenize.NEWLINE,
token_from_token_type(tokenize.NL):
tokenize.NL,
}
# Adding the end-of-string anchor \Z below, since re.fullmatch wasn't
# available in Python2.
# pytype: disable=module-attr
_NUMBERS = re.compile('(' + tokenize.Number + r')\Z')
# pytype: disable=module-attr
_SINGLE_STRINGS = re.compile('(' + tokenize.String + r')\Z')
_TRIPLE_STRING_BEGINNINGS = re.compile(
tokenize.Triple) # pytype: disable=module-attr
# pytype: disable=module-attr
_COMMENTS = re.compile('(' + tokenize.Comment + r')\Z')
_EXACT_TOKEN_TYPES = tokenize.EXACT_TOKEN_TYPES.keys() # pytype: disable=module-attr
# Token types that CubertTokenizer will tokenize by their type and not
# content.
_TOKEN_TYPES_TO_TOKENIZE_BY_TYPE = [
tokenize.NEWLINE, tokenize.DEDENT, tokenize.NL
]
def tokenize_and_abstract(
self,
source_code):
"""Produces a language-agnostic tokenization of the input code."""
agnostic_tokens: List[AbstractToken] = []
try:
token_tuples = code_to_tokens(source_code)
except (tokenize.TokenError, IndentationError) as e:
logging.warning('The tokenizer raised exception `%s` while parsing %s', e,
source_code)
# We don't try to do recovery from errors quite yet. Emit just an
# error and end-of-sequence and return.
agnostic_tokens.append(
AbstractToken(
quote_special(
TokenKind.ERROR.name),
TokenKind.ERROR,
TokenMetadata(
start=Position(
line=0, column=0),
end=Position(
line=0, column=0))))
agnostic_tokens.append(
AbstractToken(
quote_special(
TokenKind.EOS.name),
TokenKind.EOS,
TokenMetadata(
start=Position(
line=0, column=0),
end=Position(
line=0, column=0))))
return agnostic_tokens
for token_tuple in token_tuples:
spelling = token_tuple.string
kind = token_tuple.type
# We'll adjust the spelling of some tokens, e.g., those that we
# tokenize by their type rather than their original spelling. Indentation
# and dedentation tokens are like that.
adjusted_spelling = spelling
token_kind = TokenKind.NONE
if kind == tokenize.NAME:
# Disambiguate identifiers from keywords.
if keyword.iskeyword(spelling):
token_kind = TokenKind.KEYWORD
else:
token_kind = TokenKind.IDENTIFIER
else:
if kind in PythonTokenizer._TOKEN_TYPES_TO_TOKENIZE_BY_TYPE:
# Replace spelling with type.
adjusted_spelling = token_from_token_type(
kind)
elif kind is tokenize.INDENT:
# For INDENT, in particular, we also record the actual spelling too.
adjusted_spelling = '{indent}{spelling}'.format(
indent=token_from_token_type(kind),
spelling=spelling)
elif kind == tokenize.ENDMARKER:
adjusted_spelling = quote_special(
TokenKind.EOS.name)
# Map everything according to table.
try:
token_kind = PythonTokenizer._TOKEN_TYPE_MAP[kind]
except KeyError as ke:
# It's possible we're here because of async/await. Those kept being
# turned into keywords and then removed from keywords, so we can't
# rely on knowing which they are. We'll check by spelling.
# See: https://bugs.python.org/issue30406
# and https://bugs.python.org/issue33260
# and https://bugs.python.org/issue35975
if spelling in ('async', 'await'):
token_kind = TokenKind.KEYWORD
else:
raise ValueError('While trying to turn Python token %r into an '
'agnostic one, raised %r.' %
((spelling, kind), ke))
start_line, start_column = token_tuple.start
end_line, end_column = token_tuple.end
# Unlike other languages, NEWLINE tokens are reported as ending on the
# same line as where they started. We adjust that here, to stick to the
# same convention as other tokenizers.
if ((token_kind == TokenKind.NEWLINE) or
(kind == tokenize.NL)):
end_line = start_line + 1
end_column = 0
agnostic_tokens.append(
AbstractToken(
spelling=adjusted_spelling, kind=token_kind,
metadata=TokenMetadata(
# Python's tokenizer counts lines starting from 1, so we
# have to offset what we read from the `TokenInfo` tuple.
start=Position(
line=start_line - 1, column=start_column),
end=Position(
line=end_line - 1, column=end_column))))
return agnostic_tokens
def untokenize_abstract(self, whole_tokens):
# Reconstruct Python tokenizer tuples, so that Python's untokenize can be
# invoked.
token_tuples: List[Tuple[int, str]] = []
for whole_token in whole_tokens:
if whole_token in PythonTokenizer._EXACT_TOKEN_TYPES:
token_tuples.append((tokenize.OP, whole_token))
elif token_from_token_type(
tokenize.INDENT) in whole_token:
# We baked the type and spelling into one token. Break them up.
spelling = whole_token.replace(
token_from_token_type(tokenize.INDENT), '')
token_tuples.append((tokenize.INDENT, spelling))
elif whole_token in PythonTokenizer._REVERSE_TOKEN_MAP:
python_kind = PythonTokenizer._REVERSE_TOKEN_MAP[whole_token]
if python_kind in (tokenize.DEDENT, tokenize.ENDMARKER,
tokenize.ERRORTOKEN):
spelling = ''
else: # python_kind in (tokenize.NEWLINE, tokenize.NL)
spelling = '\n'
token_tuples.append((python_kind, spelling))
elif keyword.iskeyword(whole_token):
token_tuples.append((tokenize.NAME, whole_token))
elif PythonTokenizer._NUMBERS.match(whole_token):
token_tuples.append((tokenize.NUMBER, whole_token))
elif PythonTokenizer._SINGLE_STRINGS.match(whole_token):
token_tuples.append((tokenize.STRING, whole_token))
elif PythonTokenizer._TRIPLE_STRING_BEGINNINGS.match(whole_token):
token_tuples.append((tokenize.STRING, whole_token))
elif PythonTokenizer._COMMENTS.match(whole_token):
token_tuples.append((tokenize.COMMENT, whole_token))
else:
# Everything else we map back to NAME.
token_tuples.append((tokenize.NAME, whole_token))
reconstructed = tokenize.untokenize(typing.cast(Any, token_tuples))
return reconstructed
#
# Thanks to DNGRos for this huggingface/transformers compatible version
# https://github.com/google-research/google-research/issues/582
#
import os
import collections
from typing import *
from transformers import BertTokenizer
# from tensor2tensor.data_generators import text_encoder
from unif import t2t_text_encoder as text_encoder
def combine_tokenizer_with_subword(
initial_tokenizer: CuBertTokenizer,
subword_tokenizer: text_encoder.SubwordTextEncoder,
) -> Callable[[str], List[str]]:
# Try to match the functionality at
# https://github.com/google-research/google-research/blob/50c6cd94b5/cubert/code_to_subtokenized_sentences.py#L111-L118
def tokenize(string: str) -> List[str]:
toks = initial_tokenizer.tokenize(string)
tokens = flatten_list(
subword_tokenizer.decode_list(
subword_tokenizer.encode_without_tokenizing(token)
)
for token in toks
)
return tokens
return tokenize
def flatten_list(t):
return [item for sublist in t for item in sublist]
class CuBertHugTokenizer(BertTokenizer):
# A hacky version that seems to work at least for python
def __init__(
self,
vocab_file: str,
):
super().__init__(
vocab_file=vocab_file,
do_lower_case=False,
do_basic_tokenize=True,
unk_token="[UNK]_",
sep_token="[SEP]_",
pad_token="<pad>_",
cls_token="[CLS]_",
mask_token="[MASK]_",
)
if not os.path.isfile(vocab_file):
raise ValueError(
"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained "
"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
vocab_file)
)
self.vocab = self.load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.first_tokenizer = PythonTokenizer()
self.subword_tokenizer = text_encoder.SubwordTextEncoder(str(vocab_file))
self._combined_func = combine_tokenizer_with_subword(
self.first_tokenizer, self.subword_tokenizer)
def __call__(self, text):
return super().__call__(
text,
padding='max_length',
truncation='longest_first',
max_length=MAX_SEQUENCE_LENGTH
)
@property
def do_lower_case(self):
return False
| |
infinity, otherwise we could do this:
#assert(temp_reminder.prec() == 1)
temp_reminder = (1 / simple_qexp / q**(-m)).add_bigoh(1)
fab_pol = q.parent()([])
while (len(temp_reminder.coefficients()) > 0):
temp_coeff = temp_reminder.coefficients()[0]
temp_exp = -temp_reminder.exponents()[0]
fab_pol += temp_coeff*q**temp_exp
temp_reminder -= temp_coeff*j_qexp**temp_exp
# The first term is zero only up to numerical errors,
# so we manually have to remove it
if (not d.parent().is_exact()):
temp_reminder=temp_reminder.truncate_neg(-temp_exp+1)
return fab_pol.polynomial()
def F_basis_pol(self, m, order_1=ZZ(0)):
r"""
Returns a polynomial corresponding to the basis element of
the corresponding space of weakly holomorphic forms of
the same degree as ``self``. The basis element is determined
by the property that the Fourier expansion is of the form
``q^m + O(q^(order_inf + 1))``, where ``order_inf = self._l1 - order_1``.
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through
the parameter ``order_1`` (default: 0). Otherwise it is ignored.
INPUT:
- ``m`` -- An integer ``m <= self._l1``.
- ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).
This parameter is ignored if ``n != infinity``.
OUTPUT:
A polynomial in ``x,y,z,d``, corresponding to ``f_rho, f_i, E2``
and the (possibly) transcendental parameter ``d``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.weight_parameters()
(2, 3)
sage: MF.F_basis_pol(2)
x^13*y*d^2 - 2*x^8*y^3*d^2 + x^3*y^5*d^2
sage: MF.F_basis_pol(1)
(-81*x^13*y*d + 62*x^8*y^3*d + 19*x^3*y^5*d)/(-100)
sage: MF.F_basis_pol(0)
(141913*x^13*y + 168974*x^8*y^3 + 9113*x^3*y^5)/320000
sage: MF(MF.F_basis_pol(2)).q_expansion(prec=MF._l1+2)
q^2 - 41/(200*d)*q^3 + O(q^4)
sage: MF(MF.F_basis_pol(1)).q_expansion(prec=MF._l1+1)
q + O(q^3)
sage: MF(MF.F_basis_pol(0)).q_expansion(prec=MF._l1+1)
1 + O(q^3)
sage: MF(MF.F_basis_pol(-2)).q_expansion(prec=MF._l1+1)
q^-2 + O(q^3)
sage: MF(MF.F_basis_pol(-2)).parent()
WeakModularForms(n=5, k=62/3, ep=-1) over Integer Ring
sage: MF = WeakModularForms(n=4, k=-2, ep=1)
sage: MF.weight_parameters()
(-1, 3)
sage: MF.F_basis_pol(-1)
x^3/(x^4*d - y^2*d)
sage: MF.F_basis_pol(-2)
(9*x^7 + 23*x^3*y^2)/(32*x^8*d^2 - 64*x^4*y^2*d^2 + 32*y^4*d^2)
sage: MF(MF.F_basis_pol(-1)).q_expansion(prec=MF._l1+2)
q^-1 + 5/(16*d) + O(q)
sage: MF(MF.F_basis_pol(-2)).q_expansion(prec=MF._l1+2)
q^-2 + 25/(4096*d^2) + O(q)
sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)
sage: MF.F_basis_pol(3)
-y^7*d^3 + 3*x*y^5*d^3 - 3*x^2*y^3*d^3 + x^3*y*d^3
sage: MF.F_basis_pol(2)
(3*y^7*d^2 - 17*x*y^5*d^2 + 25*x^2*y^3*d^2 - 11*x^3*y*d^2)/(-8)
sage: MF.F_basis_pol(1)
(-75*y^7*d + 225*x*y^5*d - 1249*x^2*y^3*d + 1099*x^3*y*d)/1024
sage: MF.F_basis_pol(0)
(41*y^7 - 147*x*y^5 - 1365*x^2*y^3 - 2625*x^3*y)/(-4096)
sage: MF.F_basis_pol(-1)
(-9075*y^9 + 36300*x*y^7 - 718002*x^2*y^5 - 4928052*x^3*y^3 - 2769779*x^4*y)/(8388608*y^2*d - 8388608*x*d)
sage: MF.F_basis_pol(3, order_1=-1)
(-3*y^9*d^3 + 16*x*y^7*d^3 - 30*x^2*y^5*d^3 + 24*x^3*y^3*d^3 - 7*x^4*y*d^3)/(-4*x)
sage: MF.F_basis_pol(1, order_1=2)
-x^2*y^3*d + x^3*y*d
sage: MF.F_basis_pol(0, order_1=2)
(-3*x^2*y^3 - 5*x^3*y)/(-8)
sage: MF.F_basis_pol(-1, order_1=2)
(-81*x^2*y^5 - 606*x^3*y^3 - 337*x^4*y)/(1024*y^2*d - 1024*x*d)
"""
(x,y,z,d) = self.rat_field().gens()
n = self._group.n()
if (n ==infinity):
order_1 = ZZ(order_1)
order_inf = self._l1 - order_1
finf_pol = d*(x-y**2)
jinv_pol = x/(x-y**2)
rat = finf_pol**order_inf * x**order_1 * y**(ZZ(1-self._ep)/ZZ(2)) * self.Faber_pol(m, order_1)(jinv_pol)
else:
order_inf = self._l1
order_1 = order_inf
finf_pol = d*(x**n-y**2)
jinv_pol = x**n/(x**n-y**2)
rat = finf_pol**order_inf * x**self._l2 * y**(ZZ(1-self._ep)/ZZ(2)) * self.Faber_pol(m)(jinv_pol)
return rat
def F_basis(self, m, order_1=ZZ(0)):
r"""
Returns a weakly holomorphic element of ``self``
(extended if necessarily) determined by the property that
the Fourier expansion is of the form is of the form
``q^m + O(q^(order_inf + 1))``, where ``order_inf = self._l1 - order_1``.
In particular for all ``m <= order_inf`` these elements form
a basis of the space of weakly holomorphic modular forms
of the corresponding degree in case ``n!=infinity``.
If ``n=infinity`` a non-trivial order of ``-1`` can be specified through
the parameter ``order_1`` (default: 0). Otherwise it is ignored.
INPUT:
- ``m`` -- An integer ``m <= self._l1``.
- ``order_1`` -- The order at ``-1`` of ``F_simple`` (default: 0).
This parameter is ignored if ``n != infinity``.
OUTPUT:
The corresponding element in (possibly an extension of) ``self``.
Note that the order at ``-1`` of the resulting element may be
bigger than ``order_1`` (rare).
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import WeakModularForms, CuspForms
sage: MF = WeakModularForms(n=5, k=62/3, ep=-1)
sage: MF.disp_prec(MF._l1+2)
sage: MF.weight_parameters()
(2, 3)
sage: MF.F_basis(2)
q^2 - 41/(200*d)*q^3 + O(q^4)
sage: MF.F_basis(1)
q - 13071/(640000*d^2)*q^3 + O(q^4)
sage: MF.F_basis(0)
1 - 277043/(192000000*d^3)*q^3 + O(q^4)
sage: MF.F_basis(-2)
q^-2 - 162727620113/(40960000000000000*d^5)*q^3 + O(q^4)
sage: MF.F_basis(-2).parent() == MF
True
sage: MF = CuspForms(n=4, k=-2, ep=1)
sage: MF.weight_parameters()
(-1, 3)
sage: MF.F_basis(-1).parent()
WeakModularForms(n=4, k=-2, ep=1) over Integer Ring
sage: MF.F_basis(-1).parent().disp_prec(MF._l1+2)
sage: MF.F_basis(-1)
q^-1 + 80 + O(q)
sage: MF.F_basis(-2)
q^-2 + 400 + O(q)
sage: MF = WeakModularForms(n=infinity, k=14, ep=-1)
sage: MF.F_basis(3)
q^3 - 48*q^4 + O(q^5)
sage: MF.F_basis(2)
q^2 - 1152*q^4 + O(q^5)
sage: MF.F_basis(1)
q - 18496*q^4 + O(q^5)
sage: MF.F_basis(0)
1 - 224280*q^4 + O(q^5)
sage: MF.F_basis(-1)
q^-1 - 2198304*q^4 + O(q^5)
sage: MF.F_basis(3, order_1=-1)
q^3 + O(q^5)
sage: MF.F_basis(1, order_1=2)
q - 300*q^3 - 4096*q^4 + O(q^5)
sage: MF.F_basis(0, order_1=2)
1 - 24*q^2 - 2048*q^3 - 98328*q^4 + O(q^5)
sage: MF.F_basis(-1, order_1=2)
q^-1 - 18150*q^3 - 1327104*q^4 + O(q^5)
"""
basis_pol = self.F_basis_pol(m, order_1=order_1)
if (self.hecke_n() == infinity):
(x,y,z,d) = self.pol_ring().gens()
if (x.divides(basis_pol.numerator()) and m > 0):
new_space = self.extend_type("cusp")
elif (x.divides(basis_pol.denominator()) or m < 0):
new_space = self.extend_type("weak")
else:
new_space = self.extend_type("holo")
else:
if (m > 0):
new_space = self.extend_type("cusp")
elif (m >= 0):
new_space = self.extend_type("holo")
else:
new_space = self.extend_type("weak")
return new_space(basis_pol)
def _canonical_min_exp(self, min_exp, order_1):
r"""
Return an adjusted value of ``min_exp`` and ``order_1`` corresponding
to the analytic type of ``self``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import CuspForms
sage: CF = CuspForms(n=5, k=16, ep=1)
sage: CF._canonical_min_exp(-2, 0)
(1, 0)
sage: CF = CuspForms(n=infinity, k=10, ep=-1)
sage: CF._canonical_min_exp(-2, -2)
(1, 1)
"""
min_exp = ZZ(min_exp)
order_1 = ZZ(order_1)
if self.is_holomorphic():
if self.is_cuspidal():
min_exp = max(min_exp, 1)
order_1 = max(order_1, 1)
else:
min_exp = max(min_exp, 0)
order_1 = max(order_1, 0)
if (self.hecke_n() != infinity):
order_1 = ZZ(0)
return (min_exp, order_1)
def quasi_part_gens(self, r=None, min_exp=0, max_exp=infinity, order_1=ZZ(0)):
r"""
Return a basis in ``self`` of the subspace of (quasi) weakly
holomorphic forms which satisfy the specified properties on
the quasi parts and the initial Fourier coefficient.
INPUT:
- ``r`` -- An integer or ``None`` (default), indicating
the desired power of ``E2`` If ``r=None``
then all possible powers (``r``) are
choosen.
- ``min_exp`` -- An integer giving a lower bound for the
first non-trivial Fourier coefficient of the
generators (default: 0).
- ``max_exp`` -- An integer or ``infinity`` (default) giving
an upper bound for the first non-trivial
Fourier coefficient of the generators. If
``max_exp==infinity`` then no upper bound is
assumed.
- ``order_1`` -- A lower bound for the order at ``-1`` of all
quasi parts of the basis elements (default:
0). If ``n!=infinity`` this parameter is
ignored.
OUTPUT:
A basis in ``self`` of the subspace of forms which are modular
after dividing by ``E2^r`` and which have a Fourier expansion
of the form ``q^m + O(q^(m+1))`` with ``min_exp <= m <=
max_exp`` for each quasi part (and at least the specified
order at ``-1`` in case ``n=infinity``). Note that linear
combinations of forms/quasi parts maybe have a higher order at
infinity than ``max_exp``.
EXAMPLES::
sage: from sage.modular.modform_hecketriangle.space import QuasiWeakModularForms
sage: QF = QuasiWeakModularForms(n=8, k=10/3, ep=-1)
sage: QF.default_prec(1)
sage: QF.quasi_part_gens(min_exp=-1)
[q^-1 + O(q), 1 + O(q), q^-1 - 9/(128*d) + O(q), 1 + O(q), q^-1 - 19/(64*d) + O(q), q^-1 + 1/(64*d) + O(q)]
sage: QF.quasi_part_gens(min_exp=-1, max_exp=-1)
[q^-1 + O(q), q^-1 - 9/(128*d) + O(q), q^-1 - 19/(64*d) + O(q), q^-1 + 1/(64*d) + O(q)]
sage: QF.quasi_part_gens(min_exp=-2, r=1)
[q^-2 - 9/(128*d)*q^-1 - 261/(131072*d^2) + O(q), q^-1 - 9/(128*d) + O(q), 1 + O(q)]
sage: from sage.modular.modform_hecketriangle.space import ModularForms
sage: MF = ModularForms(k=36)
sage: MF.quasi_part_gens(min_exp=2)
[q^2 + 194184*q^4 + O(q^5), q^3 - 72*q^4 + O(q^5)]
sage: from sage.modular.modform_hecketriangle.space import QuasiModularForms
sage: MF = QuasiModularForms(n=5, k=6, ep=-1)
sage: MF.default_prec(2)
sage: MF.dimension()
3
sage: MF.quasi_part_gens(r=0)
[1 - 37/(200*d)*q + O(q^2)]
sage: MF.quasi_part_gens(r=0)[0] == MF.E6()
True
sage: MF.quasi_part_gens(r=1)
[1 + 33/(200*d)*q + O(q^2)]
sage: MF.quasi_part_gens(r=1)[0] == MF.E2()*MF.E4()
True
sage: MF.quasi_part_gens(r=2)
[]
sage: MF.quasi_part_gens(r=3)
[1 - 27/(200*d)*q + O(q^2)]
sage: MF.quasi_part_gens(r=3)[0] == MF.E2()^3
True
sage: from sage.modular.modform_hecketriangle.space import QuasiCuspForms, CuspForms
sage: MF = QuasiCuspForms(n=5, k=18, ep=-1)
sage: MF.default_prec(4)
sage: MF.dimension()
8
sage: MF.quasi_part_gens(r=0)
[q - 34743/(640000*d^2)*q^3 + O(q^4), q^2 - 69/(200*d)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=1)
[q - 9/(200*d)*q^2 + 37633/(640000*d^2)*q^3 + O(q^4),
q^2 + 1/(200*d)*q^3 + O(q^4)]
sage: MF.quasi_part_gens(r=2)
[q | |
# coding: utf-8
"""
TGS API
A production scale tool for BYOND server management # noqa: E501
OpenAPI spec version: 9.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class RepositoryApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def repository_controller_create(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Begin cloning the repository if it doesn't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_create(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body52 body: The Tgstation.Server.Api.Models.Request.RepositoryCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_create_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_create_with_http_info(body, api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_create_with_http_info(self, body, api, user_agent, instance, **kwargs): # noqa: E501
"""Begin cloning the repository if it doesn't exist. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_create_with_http_info(body, api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Body52 body: The Tgstation.Server.Api.Models.Request.RepositoryCreateRequest. (required)
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_create" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_create`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_create`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json-patch+json', 'application/json', 'text/json', 'application/*+json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_delete(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Delete the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_delete(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_delete_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_delete_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_delete_with_http_info(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Delete the repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_delete_with_http_info(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method repository_controller_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api' is set
if ('api' not in params or
params['api'] is None):
raise ValueError("Missing the required parameter `api` when calling `repository_controller_delete`") # noqa: E501
# verify the required parameter 'user_agent' is set
if ('user_agent' not in params or
params['user_agent'] is None):
raise ValueError("Missing the required parameter `user_agent` when calling `repository_controller_delete`") # noqa: E501
# verify the required parameter 'instance' is set
if ('instance' not in params or
params['instance'] is None):
raise ValueError("Missing the required parameter `instance` when calling `repository_controller_delete`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api' in params:
header_params['Api'] = params['api'] # noqa: E501
if 'user_agent' in params:
header_params['User-Agent'] = params['user_agent'] # noqa: E501
if 'instance' in params:
header_params['Instance'] = params['instance'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Token_Authorization_Scheme'] # noqa: E501
return self.api_client.call_api(
'/Repository', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RepositoryResponse', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def repository_controller_read(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Get the repository's status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_read(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.repository_controller_read_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
else:
(data) = self.repository_controller_read_with_http_info(api, user_agent, instance, **kwargs) # noqa: E501
return data
def repository_controller_read_with_http_info(self, api, user_agent, instance, **kwargs): # noqa: E501
"""Get the repository's status. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.repository_controller_read_with_http_info(api, user_agent, instance, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api: The API version being used in the form \"Tgstation.Server.Api/[API version]\" (required)
:param str user_agent: The user agent of the calling client. (required)
:param int instance: The instance ID being accessed (required)
:return: RepositoryResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api', 'user_agent', 'instance'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument | |
linkTo(self, linkFilePath):
"""
Creates a symlink to self to at the path in the L{FilePath}
C{linkFilePath}.
Only works on posix systems due to its dependence on
L{os.symlink}. Propagates L{OSError}s up from L{os.symlink} if
C{linkFilePath.parent()} does not exist, or C{linkFilePath} already
exists.
@param linkFilePath: a FilePath representing the link to be created.
@type linkFilePath: L{FilePath}
"""
os.symlink(self.path, linkFilePath.path)
def open(self, mode='r'):
"""
Open this file using C{mode} or for writing if C{alwaysCreate} is
C{True}.
In all cases the file is opened in binary mode, so it is not necessary
to include C{"b"} in C{mode}.
@param mode: The mode to open the file in. Default is C{"r"}.
@type mode: L{str}
@raises AssertionError: If C{"a"} is included in the mode and
C{alwaysCreate} is C{True}.
@rtype: L{file}
@return: An open L{file} object.
"""
if self.alwaysCreate:
assert 'a' not in mode, ("Appending not supported when "
"alwaysCreate == True")
return self.create()
# This hack is necessary because of a bug in Python 2.7 on Windows:
# http://bugs.python.org/issue7686
mode = mode.replace('b', '')
return open(self.path, mode + 'b')
# stat methods below
def restat(self, reraise=True):
"""
Re-calculate cached effects of 'stat'. To refresh information on this
path after you know the filesystem may have changed, call this method.
@param reraise: a boolean. If true, re-raise exceptions from
L{os.stat}; otherwise, mark this path as not existing, and remove
any cached stat information.
@raise Exception: If C{reraise} is C{True} and an exception occurs
while reloading metadata.
"""
try:
self._statinfo = stat(self.path)
except OSError:
self._statinfo = 0
if reraise:
raise
def changed(self):
"""
Clear any cached information about the state of this path on disk.
@since: 10.1.0
"""
self._statinfo = None
def chmod(self, mode):
"""
Changes the permissions on self, if possible. Propagates errors from
L{os.chmod} up.
@param mode: integer representing the new permissions desired (same as
the command line chmod)
@type mode: L{int}
"""
os.chmod(self.path, mode)
def getsize(self):
"""
Retrieve the size of this file in bytes.
@return: The size of the file at this file path in bytes.
@raise Exception: if the size cannot be obtained.
@rtype: L{int}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_size
def getModificationTime(self):
"""
Retrieve the time of last access from this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_mtime)
def getStatusChangeTime(self):
"""
Retrieve the time of the last status change for this file.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_ctime)
def getAccessTime(self):
"""
Retrieve the time that this file was last accessed.
@return: a number of seconds from the epoch.
@rtype: L{float}
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return float(st.st_atime)
def getInodeNumber(self):
"""
Retrieve the file serial number, also called inode number, which
distinguishes this file from all other files on the same device.
@raise NotImplementedError: if the platform is Windows, since the
inode number would be a dummy value for all files in Windows
@return: a number representing the file serial number
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_ino
def getDevice(self):
"""
Retrieves the device containing the file. The inode number and device
number together uniquely identify the file, but the device number is
not necessarily consistent across reboots or system crashes.
@raise NotImplementedError: if the platform is Windows, since the
device number would be 0 for all partitions on a Windows platform
@return: a number representing the device
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_dev
def getNumberOfHardLinks(self):
"""
Retrieves the number of hard links to the file.
This count keeps track of how many directories have entries for this
file. If the count is ever decremented to zero then the file itself is
discarded as soon as no process still holds it open. Symbolic links
are not counted in the total.
@raise NotImplementedError: if the platform is Windows, since Windows
doesn't maintain a link count for directories, and L{os.stat} does
not set C{st_nlink} on Windows anyway.
@return: the number of hard links to the file
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_nlink
def getUserID(self):
"""
Returns the user ID of the file's owner.
@raise NotImplementedError: if the platform is Windows, since the UID
is always 0 on Windows
@return: the user ID of the file's owner
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_uid
def getGroupID(self):
"""
Returns the group ID of the file.
@raise NotImplementedError: if the platform is Windows, since the GID
is always 0 on windows
@return: the group ID of the file
@rtype: L{int}
@since: 11.0
"""
if platform.isWindows():
raise NotImplementedError
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return st.st_gid
def getPermissions(self):
"""
Returns the permissions of the file. Should also work on Windows,
however, those permissions may not be what is expected in Windows.
@return: the permissions for the file
@rtype: L{Permissions}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat()
st = self._statinfo
return Permissions(S_IMODE(st.st_mode))
def exists(self):
"""
Check if this L{FilePath} exists.
@return: C{True} if the stats of C{path} can be retrieved successfully,
C{False} in the other cases.
@rtype: L{bool}
"""
if self._statinfo:
return True
else:
self.restat(False)
if self._statinfo:
return True
else:
return False
def isdir(self):
"""
Check if this L{FilePath} refers to a directory.
@return: C{True} if this L{FilePath} refers to a directory, C{False}
otherwise.
@rtype: L{bool}
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISDIR(st.st_mode)
def isfile(self):
"""
Check if this file path refers to a regular file.
@return: C{True} if this L{FilePath} points to a regular file (not a
directory, socket, named pipe, etc), C{False} otherwise.
@rtype: L{bool}
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISREG(st.st_mode)
def isBlockDevice(self):
"""
Returns whether the underlying path is a block device.
@return: C{True} if it is a block device, C{False} otherwise
@rtype: L{bool}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISBLK(st.st_mode)
def isSocket(self):
"""
Returns whether the underlying path is a socket.
@return: C{True} if it is a socket, C{False} otherwise
@rtype: L{bool}
@since: 11.1
"""
st = self._statinfo
if not st:
self.restat(False)
st = self._statinfo
if not st:
return False
return S_ISSOCK(st.st_mode)
def islink(self):
"""
Check if this L{FilePath} points to a symbolic link.
@return: C{True} if this L{FilePath} points to a symbolic link,
C{False} otherwise.
@rtype: L{bool}
"""
# We can't use cached stat results here, because that is the stat of
# the destination - (see #1773) which in *every case* but this one is
# the right thing to use. We could call lstat here and use that, but
# it seems unlikely we'd actually save any work that way. -glyph
return islink(self.path)
def isabs(self):
"""
Check if this L{FilePath} refers to an absolute path.
This always returns C{True}.
@return: C{True}, always.
@rtype: L{bool}
"""
return isabs(self.path)
def listdir(self):
"""
List the base names of the direct children of this L{FilePath}.
@return: A L{list} of L{bytes}/L{unicode} giving the names of the
contents of the directory this L{FilePath} refers to. These names
are relative to this L{FilePath}.
@rtype: L{list}
@raise: Anything the platform L{os.listdir} implementation might raise
(typically L{OSError}).
"""
return listdir(self.path)
def splitext(self):
"""
Split the file path into a pair C{(root, ext)} such that
C{root + | |
<filename>Breakout/RainbowBreakout.py
"""
This is an attempt to recreate the algorithm that was used by deepmind in the
first major paper they published about beating atari games.
https://www.cs.toronto.edu/~vmnih/docs/dqn.pdf
Uses some changes suggested in
https://becominghuman.ai/lets-build-an-atari-ai-part-1-dqn-df57e8ff3b26
And the gaps were filled based on the implementation in
https://github.com/boyuanf/DeepQLearning/blob/master/deep_q_learning.py
"""
from datetime import datetime
from resource import getrusage, RUSAGE_SELF
from matplotlib import pyplot as plt
import tensorflow as tf
import numpy as np
import gym, os, argparse, sys, time
parent_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if parent_dir not in sys.path:
sys.path.insert(1, parent_dir)
from utils.ExperienceBuffer import WeightedExpBuf
from utils.BaseReplayQnet import BaseReplayQnet
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=str, help='train, show')
parser.add_argument('--e_i', type=float, default=1,
help="Initial chance of selecting a random action.")
parser.add_argument('--e_f', type=float, default=.05,
help="Final chance of selecting a random action.")
parser.add_argument(
'--e_anneal', type=int, default=int(10e6),
help='Number of transition replays over which to linearly anneal from e_i '
'to e_f.')
parser.add_argument(
'--ckpt_dir', type=str,
help='Folder to save checkpoints to.')
parser.add_argument('--restore_ckpt', type=str,
help='path to restore a ckpt from')
parser.add_argument(
'--exp_capacity', type=int, default=int(6e5),
help='Number of past experiences to hold for replay. (600k ~ 12.5GB)')
parser.add_argument(
'--begin_updates', type=int, default=int(2e5),
help='Number of experiences before begin to training begins.')
parser.add_argument(
'--batch_size', type=int, default=32,
help='Batch size for each update to the network (multiple of 8)')
parser.add_argument(
'--output_period', type=int, default=int(2e6),
help='Number of transition updates between outputs (print, checkpoint)')
parser.add_argument(
'--learning_rate', type=float, default=1e-4,
help="learning rate for the network. passed to the optimizer.")
parser.add_argument(
'--future_discount', type=float, default=0.99,
help="Rate at which to discount future rewards.")
parser.add_argument('--train_record_fname', type=str,
default="training-record-RainbowBreakout.txt",
help="Absolute path to file to save progress to (same as what is"
" printed to cmd line.")
parser.add_argument('--train_steps', type=int, default=int(100e6),
help="Number of transition replays to experience "
"(will update train_steps // batch_size times)")
parser.add_argument(
'--show_random', type=bool, default=False,
help="Use random actions when mode=show at a rate of e_f")
parser.add_argument(
'--random_starts', type=int, default=30,
help='randomly perform stand still at beginning of episode.')
parser.add_argument('--alpha', type=float, default=.6,
help="Factor for how much weight prioritization")
parser.add_argument('--beta_i', type=float, default=.4,
help="initial weighting for bias correction")
parser.add_argument('--beta_f', type=float, default=1,
help="final weighting for bias correction")
# Not sure what value makes sense for the weight offset. Does having an
# absolute offset even make sense at all? The absolute value of the loss
# is fairly meaningless, given I am performing a classification with no
# normalization layer.
parser.add_argument('--priority_weight_offset', type=float, default=.01,
help="small value so no transition has 0 weight.")
def preprocess_img(img):
"""
Images are converted from RGB to grayscale and downsampled by a factor of
2. Deepmind actually used a final 84x84 image by cropping since their GPU
wanted a square input for convolutions. We do not preprocess, rather we
store as uint8 for the sake of memory.
:param img: Atari image (210, 160, 3)
:return: Grayscale downsample version (85, 80)
"""
return np.mean(img[::2, ::2], axis=2).astype(np.uint8)[15:100, :]
def normalize(states):
"""
:param states: numpy array of states
:return: normalized img with values on [-1, 1)
"""
return states.astype(np.float32) / 128. - 1
class RainbowBreakoutQnet(BaseReplayQnet):
"""
Class to perform basic Q learning
"""
def __init__(self, input_shape, n_actions, batch_size, optimizer,
exp_buf_capacity, discount, alpha, beta_i, beta_f, beta_anneal,
weight_offset, is_training):
self.is_training = is_training
exp_buf = WeightedExpBuf(exp_buf_capacity, alpha, beta_i, beta_f,
beta_anneal, weight_offset)
BaseReplayQnet.__init__(
self, input_shape, n_actions, batch_size, optimizer, exp_buf,
discount)
def make_nn_impl(self):
"""
Make a NN to take in a batch of states (3 preprocessed images) with
an output of size 3 (stay, left, right). No activation function is
applied to the final output.
:return:
"""
if (self.is_training):
print('using dropout')
print('state_input', self.state_input)
conv1 = tf.layers.conv2d(self.state_input, 32, (8, 8), (4, 4),
activation=tf.nn.relu)
print('conv1', conv1)
conv2 = tf.layers.conv2d(conv1, 64, (4, 4), (2, 2),
activation=tf.nn.relu)
print('conv2', conv2)
hidden1 = tf.layers.dense(tf.layers.flatten(conv2), 256,
activation=tf.nn.relu)
print('hidden1', hidden1)
dropout1 = tf.layers.dropout(hidden1, training=self.is_training)
print('dropout1', dropout1)
return tf.layers.dense(dropout1, self.n_actions)
def loss_fn(self, expected, actual):
"""
A function for calculating the loss of the neural network.
IS_weights_input must be created here since loss_fn is an abstractmethod
and must be available to the parent, so dependencies cannot be pushed
off until after parent construction. (Seems like a bad design in python,
a virtual function should be allowed to depend on things specific to
the child/implementer class).
:param expected: a batch of target_vals
:param actual: a batch of ouputs from the network
:return: a batch of losses.
"""
# When using priority replay the network sees surprising events
# more often. These events, by their nature, tend to have larger
# than median errors. This combination of seeing larger loss
# events at a larger frequency, along with the variable priority of
# the transitions, adds a bias to the network. Importance sampling
# is used to correct for this, downweighting the loss for the events
# that will be selected more often.
self.IS_weights_input = tf.placeholder(shape=None, dtype=tf.float32)
return tf.losses.huber_loss(
labels=expected, predictions=actual,
weights=self.IS_weights_input,
reduction=tf.losses.Reduction.NONE)
def update(self, sess):
"""
Perform a basic Q learning update by taking a batch of experiences from
memory and replaying them.
:param sess: tf.Session()
"""
# Get a batch of past experiences.
ids, states, actions, rewards, next_states, not_terminals, IS_weights =\
self.exp_buf.sample(self.batch_size)
states = normalize(states)
next_states = normalize(next_states)
# Calculate the predicted value from the network based on a previously
# experienced state, assuming we perform the same action.
fullQ = sess.run(self.main_net,
feed_dict={self.state_input: next_states})
pred_vals = fullQ[range(self.batch_size), actions]
# To predict the 'true' Q value, we use the network to predict the value
# of the next_state, which is the value of the best action we can take
# when in that state. Then take the value that the network predicts
# we can get from the next_state.
next_actions = self.predict(sess, next_states)
fullQ = sess.run(self.main_net,
feed_dict={self.state_input: next_states})
nextQ = fullQ[range(self.batch_size), next_actions]
# Discounted future value:
# trueQ = r + discount * Q(next_state, next_action)
# If this is a terminal term, trueQ = r
target_vals = rewards + not_terminals * self.discount * nextQ
# Update the weighting for each experience based on its TD error.
# TODO: why do we take the error before training, don't these weights
# become stale as soon as train_op is run?
self.exp_buf.update_weights(ids, abs(target_vals - pred_vals))
# Calculate the value of being back in state and performing action. Then
# compare that the the expected value just calculated. This is used to
# compute the error for feedback. Then backpropogate the loss so that
# the network can update.
_ = sess.run(self.train_op,
feed_dict={
self.state_input: states,
self.action_input: actions,
self.target_vals_input: target_vals,
self.IS_weights_input: IS_weights})
def get_qnet(args, scope=''):
"""
Wrapper for getting the Breakout network so don't have to copy and paste
the same params each time.
"""
assert args.batch_size % 8 == 0, "batch_size must be a multiple of 8"
optimizer = tf.train.AdamOptimizer(args.learning_rate)
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
return RainbowBreakoutQnet(
input_shape = (85, 80, 3), n_actions=3, batch_size=args.batch_size,
optimizer=optimizer, exp_buf_capacity=args.exp_capacity,
discount=args.future_discount, alpha=args.alpha, beta_i=args.beta_i,
beta_f=args.beta_f, beta_anneal=args.train_steps // args.batch_size,
weight_offset=args.priority_weight_offset,
is_training= (args.mode == 'train'))
def play_episode(args, sess, env, qnet, e):
"""
Actually plays a single game and performs updates once we have enough
experiences.
:param args: parser.parse_args
:param sess: tf.Session()
:param env: gym.make()
:param qnet: class which holds the NN to play and update.
:param e: chance of a random action selection.
:return: reward earned in the game, update value of e, transitions updated
against.
"""
done = False
_ = env.reset()
reward = 0 # total reward for this episode
turn = 0
transitions = 0 # updates * batch_size
lives = 5 # Always start with 5 lives
terminal = True # Anytime we lose a life, and beginning of episode.
while not done:
if terminal:
terminal = False
# To make sure that the agent doesn't just learn to set up well for
# the way the game starts, begin the game by not doing anything and
# letting the ball move.
for _ in range(np.random.randint(1, args.random_starts)):
img, _, done, info = env.step(1) # starts game, but stays still
img = preprocess_img(img)
state = np.stack((img, img, img), axis=2)
action = qnet.predict(sess, normalize(np.array([state])))[0]
if np.random.rand(1) < e:
action = qnet.rand_action()
# Perform an action, prep the data, and store as an experience
img, r, done, info = env.step(action + 1) # {1, 2, 3}
img = np.reshape(preprocess_img(img), (85, 80, 1))
next_state = np.concatenate((state[:, :, 1:], img), axis=2)
if info['ale.lives'] < lives:
terminal = True
lives = info['ale.lives']
qnet.add_experience(state, action, r, next_state, terminal)
if qnet.exp_buf_size() | |
<reponame>nuttamas/PycQED_py3
import adaptive
from adaptive.learner import LearnerND
import numpy as np
from functools import partial
from collections.abc import Iterable
import logging
log = logging.getLogger(__name__)
log.error("`learnerND_optimize` is deprecated! Use `learnernND_minimize`.")
# ######################################################################
# Loss function utilities for adaptive.learner.learnerND
# ######################################################################
"""
NB: Only works with ND > 1 domain, and 1D image
Possible things to improve
- try resolution loss with the default losses of the adaptive package
- find how to calculate the extension of a simplex in each dimension
such that it would be possible to specify the resolution boundaries
per dimension
"""
def mk_res_loss_func(default_loss_func, min_volume=0.0, max_volume=1.0):
# *args, **kw are used to allow for things like mk_target_func_val_loss_example
def func(simplex, values, value_scale, *args, **kw):
vol = adaptive.learner.learnerND.volume(simplex)
if vol < min_volume:
return 0.0 # don't keep splitting sufficiently small simplices
elif vol > max_volume:
return np.inf # maximally prioritize simplices that are too large
else:
return default_loss_func(simplex, values, value_scale, *args, **kw)
# Preserve loss function atribute in case a loss function from
# adaptive.learner.learnerND is given
if hasattr(default_loss_func, "nth_neighbors"):
func.nth_neighbors = default_loss_func.nth_neighbors
return func
def mk_non_uniform_res_loss_func(
default_loss_func, n_points: int = 249, n_dim: int = 1, res_bounds=(0.5, 3.0)
):
"""
This function is intended to allow for specifying the min and max
simplex volumes in a more user friendly and not precise way.
For a more precise way use the mk_res_loss_func to specify the
simplex volume limits directly
"""
# LearnerND normalizes the parameter space to unity
normalized_domain_size = 1.0
assert res_bounds[1] > res_bounds[0]
pnts_per_dim = np.ceil(np.power(n_points, 1.0 / n_dim)) # n-dim root
uniform_resolution = normalized_domain_size / pnts_per_dim
min_volume = (uniform_resolution * res_bounds[0]) ** n_dim
max_volume = (uniform_resolution * res_bounds[1]) ** n_dim
func = mk_res_loss_func(
default_loss_func, min_volume=min_volume, max_volume=max_volume
)
return func
# ######################################################################
# LearnerND wrappings to be able to access all learner data
# ######################################################################
class LearnerND_Optimize(LearnerND):
"""
Does everything that the LearnerND does plus wraps it such that
`mk_optimize_res_loss_func` can be used
It also accepts using loss fucntions made by
`mk_non_uniform_res_loss_func` and `mk_res_loss_func`
inluding providing one of the loss functions from
adaptive.learner.learnerND
The resolution loss function in this doc are built such that some
other loss function is used when the resolution boundaries are respected
"""
def __init__(self, func, bounds, loss_per_simplex=None):
super(LearnerND_Optimize, self).__init__(func, bounds, loss_per_simplex)
# Keep the orignal learner behaviour but pass extra arguments to
# the provided input loss function
if hasattr(self.loss_per_simplex, "needs_learner_access"):
self.best_min = np.inf
self.best_max = -np.inf
# Save the loss fucntion that requires the learner instance
input_loss_per_simplex = self.loss_per_simplex
self.loss_per_simplex = partial(input_loss_per_simplex, learner=self)
def mk_optimization_loss(minimize=True, use_grad=False):
def func(simplex, values, value_scale, learner):
# Assumes values is numpy array
# The learner evaluate first the boundaries
# make sure the min max takes in account all data at the beggining
# of the sampling
if not learner.bounds_are_done:
local_min = np.min(list(learner.data.values()))
local_max = np.max(list(learner.data.values()))
else:
local_min = np.min(values)
local_max = np.max(values)
learner.best_min = (
local_min if learner.best_min > local_min else learner.best_min
)
learner.best_max = (
local_max if learner.best_max < local_max else learner.best_max
)
values_domain_len = np.subtract(learner.best_max, learner.best_min, dtype=float)
if values_domain_len == 0:
# A better number precision check should be used
# This should avoid running into numerical problems at least
# when the values are exctly the same.
return 0.5
# Normalize to the values domain
# loss will always be positive
# This is important because the learner expect positive output
# from the loss function
if minimize:
loss = np.average((learner.best_max - values) / values_domain_len)
else:
loss = np.average((values - learner.best_min) / values_domain_len)
if use_grad:
loss += np.std(values) / values_domain_len
return loss
func.needs_learner_access = True
return func
def mk_optimize_res_loss_func(
n_points, n_dim, res_bounds=(0.5, 3.0), minimize=True, use_grad=False
):
"""
Creates a loss function that distributes sampling points over the
sampling domain in a more optimal way compared to uniform sampling
with the goal of finding the minima or maxima
It samples with an enforced resolution minimum and maximum.
Arguments:
n_points: budget of point available to sample
n_dim: domain dimension of the function to sample
res_bounds: (res_boundss[0], res_boundss[1]) resolution in
units of uniform resolution
(0., np.inf) => infinitely small resolution allowed and no
minimum resolution imposed (i.e. don't force to explore the
full domain)
using (0., np.inf) will stuck the learner at the first optimal
it finds
minimize: (bool) False for maximize
use_grad: (bool) adds the std of the simplex's value to the loss
Makes the learner get more "stuck" in regions with high gradients
Return: loss_per_simplex function to be used with LearnerND
"""
opt_loss_func = mk_optimization_loss(minimize=minimize, use_grad=use_grad)
func = mk_non_uniform_res_loss_func(
opt_loss_func, n_points=n_points, n_dim=n_dim, res_bounds=res_bounds
)
func.needs_learner_access = True
return func
# ######################################################################
# Below is the first attempt, it works but the above one is more general
# ######################################################################
def mk_target_func_val_loss_example(val):
"""
This is an attemp to force the learner to keep looking for better
optimal points and not just being pushed away from the local optimal
(when using this as the default_loss_func with mk_res_loss_func)
It is constantly trying to find a better point than the best seen.
NB: Didn't seem to work for me for the CZ simulations
NB2: It is still a good example of how to use the LearnerND wrapper above
such that the entire learner data is available without modifying the
original LearnerND on any other way that might become very
incompatible later
"""
def target_func_val_loss(simplex, values, value_scale, learner):
# Assumes values is numpy array
loss_value = 1.0 / np.sum((values - val) ** 2)
# Keep updating the widest range
learner.best_min = (
loss_value if learner.best_min > loss_value else learner.best_min
)
learner.best_max = (
loss_value if learner.best_max < loss_value else learner.best_max
)
# downscore simplex to be minimum if it is not better than best seen loss
return learner.best_min if loss_value < learner.best_max else loss_value
return target_func_val_loss
"""
Possible improvement for the use of std
- Try including also the nearst points in the std and see if it works
even better
"""
def mk_target_func_val_loss_times_std(val):
def target_func_val_loss(simplex, values, value_scale):
# Assumes values is numpy array
loss_value = 1.0 / np.sum((values - val) ** 2) * np.std(values)
return loss_value
return target_func_val_loss
def mk_target_func_val_loss_plus_std(val):
"""
This one is sensible to the gradient only a bit
The mk_target_func_val_loss_times_std seemed to work better
"""
def target_func_val_loss(simplex, values, value_scale):
# Assumes values is numpy array
loss_value = 1.0 / np.sum((values - val) ** 2) + np.std(values)
return loss_value
return target_func_val_loss
# ######################################################################
def mk_target_func_val_loss(val):
def target_func_val_loss(simplex, values, value_scale):
# Assumes values is numpy array
loss_value = 1.0 / np.sum((values - val) ** 2)
return loss_value
return target_func_val_loss
def mk_target_val_res_loss_func(
target_value, n_points, n_dim, res_bounds=(0.5, 3.0), default_loss_func="sum"
):
if isinstance(default_loss_func, str):
if default_loss_func == "times_std":
default_func = mk_target_func_val_loss_times_std(target_value)
elif default_loss_func == "plus_std":
log.warning("times_std is probably better...")
default_func = mk_target_func_val_loss_plus_std(target_value)
elif default_loss_func == "needs_learner_example":
default_func = mk_target_func_val_loss_example(target_value)
elif default_loss_func == "sum":
default_func = mk_target_func_val_loss(target_value)
else:
raise ValueError("Default loss function type not recognized!")
func = mk_non_uniform_res_loss_func(
default_func, n_points=n_points, n_dim=n_dim, res_bounds=res_bounds
)
if default_loss_func == "needs_learner_example":
func.needs_learner_access = True
return func
# ######################################################################
# Attempt to limit the resolution in each dimension
# ######################################################################
def mk_res_loss_per_dim_func(
default_loss_func, min_distances=0.0, max_distances=np.inf
):
"""
This function is intended to allow for specifying the min and max
distance between points for adaptive sampling for each dimension or
for all dimensions
"""
# if min_distances is None and max_distances is not None:
# min_distances = np.full(np.size(max_distances), np.inf)
# elif max_distances is None and min_distances is not None:
# max_distances = np.full(np.size(min_distances), 0.0)
# else:
# raise ValueError("The min_distances or max_distances must be specified!")
min_distances = np.asarray(min_distances)
max_distances = np.asarray(max_distances)
assert np.all(min_distances < max_distances)
def func(simplex, values, value_scale, *args, **kw):
learner = kw.pop("learner")
verticesT = simplex.T
max_for_each_dim = np.max(verticesT, axis=1)
min_for_each_dim = np.min(verticesT, axis=1)
diff = max_for_each_dim - min_for_each_dim
if np.all(diff < min_distances):
# don't keep splitting sufficiently small simplices
loss = 0.0
elif np.any(diff > max_distances):
# maximally prioritize simplices that are too large in any dimension
loss = np.inf
else:
if hasattr(default_loss_func, "needs_learner_access"):
kw["learner"] = learner
loss = default_loss_func(simplex, values, value_scale, *args, **kw)
return loss
func.needs_learner_access = True
return func
def mk_optimize_res_loss_per_dim_func(
bounds, min_distances=0.0, max_distances=np.inf, minimize=True, use_grad=False
):
"""
It doesn't work well because I dind't realise soon enough that more
control over how the learner | |
<gh_stars>0
#!/usr/bin/env python
import logging
import sys
sys.path.append("../../")
sys.path.append("pylib")
import time
import datetime
import pymongo
import uuid
import os
import subprocess
import os.path
import settings
from common.utils import getSiteDBCollection
sys.path.insert(0, "../../")
class LoggingManager:
def __init__(self):
self.h_console = None
self.h_file = None
logging.getLogger('').setLevel(logging.INFO)
def reconfig_h_console(self, site_id, calculation_id):
if self.h_console is not None:
self.h_console.flush()
logging.getLogger('').removeHandler(self.h_console)
self.h_console = logging.StreamHandler()
self.h_console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s|" + calculation_id +
"|%(levelname)s|%(name)s|%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
self.h_console.setFormatter(formatter)
logging.getLogger('').addHandler(self.h_console)
def getLogFilePath(self, site_id, calculation_id):
site_log_dir = os.path.join(settings.log_dir, site_id)
if not os.path.isdir(site_log_dir):
os.makedirs(site_log_dir)
formatted_date_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
log_file_name = "%s_%s.log" % (formatted_date_time, calculation_id)
log_file_path = os.path.join(site_log_dir, log_file_name)
return log_file_path
def reconfig_h_file(self, site_id, calculation_id):
if self.h_file is not None:
self.h_file.flush()
self.h_file.close()
logging.getLogger('').removeHandler(self.h_file)
self.h_file = logging.FileHandler(
self.getLogFilePath(site_id, calculation_id))
self.h_file.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s|%(levelname)s|%(name)s|%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
self.h_file.setFormatter(formatter)
logging.getLogger('').addHandler(self.h_file)
def reconfig(self, site_id, calculation_id):
self.reconfig_h_console(site_id, calculation_id)
self.reconfig_h_file(site_id, calculation_id)
logging_manager = LoggingManager()
def getLogger():
return logging.getLogger("Batch Server")
def getBaseWorkDir(site_id, calculation_id):
site_work_dir = os.path.join(settings.work_dir, site_id)
formatted_date_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
calculation_work_dir_name = "%s_%s" % (formatted_date_time, calculation_id)
calculation_work_dir_path = os.path.join(
site_work_dir, calculation_work_dir_name)
os.makedirs(calculation_work_dir_path)
return calculation_work_dir_path
def getConnection():
if(settings.replica_set):
return pymongo.MongoReplicaSetClient(settings.mongodb_host, replicaSet=settings.replica_set)
else:
return pymongo.Connection(settings.mongodb_host)
connection = getConnection()
class ShellExecutionError(Exception):
pass
class BaseFlow:
def __init__(self, name):
self.name = name
self.jobs = []
self.dependencies = []
def dependOn(self, flow):
self.parent = flow
flow.dependencies.append(self)
def getWorkDir(self):
work_dir = os.path.join(BASE_WORK_DIR, self.name)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
return work_dir
def getWorkFile(self, file_name):
return os.path.join(self.getWorkDir(), file_name)
def __call__(self):
global CALC_SUCC
writeFlowBegin(SITE_ID, self.__class__.__name__)
if self.__class__.__name__ in DISABLEDFLOWS:
getLogger().info("Flow Skipped: %s" % self.__class__.__name__)
writeFlowEnd(SITE_ID, self.__class__.__name__,
is_successful=True, is_skipped=True)
return True
else:
for job_callable in self.jobs:
if not self._execJob(job_callable):
writeFlowEnd(
SITE_ID, self.__class__.__name__, is_successful=False, is_skipped=False,
err_msg="SOME_JOBS_FAILED")
CALC_SUCC = False
return False
writeFlowEnd(SITE_ID, self.__class__.__name__,
is_successful=True, is_skipped=False)
# execute downlines
for dependency in self.dependencies:
dependency()
return True
def _exec_shell(self, command):
getLogger().info("Execute %s" % command)
#ret_code = os.system(command)
# if ret_code != 0:
# raise ShellExecutionError("Shell Execution Failed, ret_code=%s" % ret_code)
ret_code = subprocess.call(command, shell=True)
if ret_code != 0:
getLogger().error("Failed %s" % sys.stderr)
raise ShellExecutionError(
"Shell Execution Failed, ret_code=%s" % ret_code)
def _execJob(self, callable):
try:
getLogger().info("Start Job: %s.%s" %
(self.__class__.__name__, callable.__name__))
callable()
getLogger().info("Job Succ: %s.%s" %
(self.__class__.__name__, callable.__name__))
return True
except:
getLogger(
).critical("An Exception happened while running Job: %s" % callable,
exc_info=True)
# TODO: send message (email, sms)
# TODO: record exception info.
writeFailedJob(SITE_ID, self.__class__.__name__, callable.__name__)
return False
class PreprocessingFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "preprocessing")
self.jobs += [self.do_backfill,
self.do_reverse_reversed_backfilled_raw_logs]
def do_backfill(self):
from preprocessing import backfiller
last_ts = None # FIXME: load correct last_ts from somewhere
bf = backfiller.BackFiller(connection, SITE_ID, last_ts,
self.getWorkFile("reversed_backfilled_raw_logs"))
last_ts = bf.start() # FIXME: save last_ts somewhere
def do_reverse_reversed_backfilled_raw_logs(self):
input_path = self.getWorkFile("reversed_backfilled_raw_logs")
output_path = self.getWorkFile("backfilled_raw_logs")
self._exec_shell("%s <%s >%s" %
(settings.tac_command, input_path, output_path))
class HiveBasedStatisticsFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "hive-based-statistics")
self.jobs += [self.do_hive_based_calculations]
# Begin Hive Based Calculations
def do_hive_based_calculations(self):
from statistics.hive_based_calculations import hive_based_calculations
backfilled_raw_logs_path = self.parent.getWorkFile(
"backfilled_raw_logs")
hive_based_calculations(
connection, SITE_ID, self.getWorkDir(), backfilled_raw_logs_path)
#
# End Hive Based Calculations
class BaseSimilarityCalcFlow(BaseFlow):
def __init__(self, type):
BaseFlow.__init__(self, "similarities-calc:%s" % type)
self.type = type
self.jobs += self.getExtractUserItemMatrixJobs(
) + [self.do_sort_user_item_matrix,
self.do_calc_item_prefer_count,
self.do_calc_user_count,
self.do_emit_cooccurances,
self.do_sort_cooccurances,
self.do_count_cooccurances,
self.do_format_cooccurances_counts,
self.do_calc_item_similarities,
self.do_make_item_similarities_bi_directional,
self.do_sort_item_similarities_bi_directional,
self.do_extract_top_n,
self.do_upload_item_similarities_result]
def do_sort_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix")
output_path = self.getWorkFile("user_item_matrix_sorted")
self._exec_shell("sort -T /cube/services/batch/temp %s > %s" %
(input_path, output_path))
def do_calc_item_prefer_count(self):
if SITE["algorithm_type"] == "llh":
input_path = self.getWorkFile("user_item_matrix_sorted")
output_path = self.getWorkFile("item_prefer_count")
self._exec_shell(
"cut -d , -f 2 %s | sort -T /cube/services/batch/temp | uniq -c > %s" %
(input_path, output_path))
def do_calc_user_count(self):
if SITE["algorithm_type"] == "llh":
input_path = self.getWorkFile("user_item_matrix_sorted")
output_path = self.getWorkFile("user_count")
self._exec_shell("cut -d , -f 1 %s | uniq | wc -l > %s" %
(input_path, output_path))
def do_emit_cooccurances(self):
from similarity_calculation.amazon.emit_cooccurances import emit_cooccurances
input_path = self.getWorkFile("user_item_matrix_sorted")
output_path = self.getWorkFile("cooccurances_not_sorted")
emit_cooccurances(input_path, output_path)
def do_sort_cooccurances(self):
input_path = self.getWorkFile("cooccurances_not_sorted")
output_path = self.getWorkFile("cooccurances_sorted")
self._exec_shell("sort -T /cube/services/batch/temp %s > %s" %
(input_path, output_path))
def do_count_cooccurances(self):
input_path = self.getWorkFile("cooccurances_sorted")
output_path = self.getWorkFile("cooccurances_counts_raw")
self._exec_shell("uniq -c %s > %s" % (input_path, output_path))
def do_format_cooccurances_counts(self):
from similarity_calculation.amazon.format_item_similarities import format_item_similarities
input_path = self.getWorkFile("cooccurances_counts_raw")
output_path = self.getWorkFile("cooccurances_counts_formatted")
format_item_similarities(input_path, output_path)
def do_calc_item_similarities(self):
if SITE["algorithm_type"] == "llh":
from similarity_calculation.loglikelihood.calc_loglikelihood import calc_loglikelihood
cooccurances_counts_path = self.getWorkFile(
"cooccurances_counts_formatted")
user_counts_path = self.getWorkFile("user_count")
item_prefer_count_path = self.getWorkFile("item_prefer_count")
output_path = self.getWorkFile("item_similarities_formatted")
calc_loglikelihood(cooccurances_counts_path,
user_counts_path, item_prefer_count_path, output_path)
else:
input_path = self.getWorkFile("cooccurances_counts_formatted")
output_path = self.getWorkFile("item_similarities_formatted")
self._exec_shell("mv %s %s" % (input_path, output_path))
def do_make_item_similarities_bi_directional(self):
from similarity_calculation.make_similarities_bidirectional import make_similarities_bidirectional
input_path = self.getWorkFile("item_similarities_formatted")
output_path = self.getWorkFile("item_similarities_bi_directional")
make_similarities_bidirectional(input_path, output_path)
def do_sort_item_similarities_bi_directional(self):
input_path = self.getWorkFile("item_similarities_bi_directional")
output_path = self.getWorkFile(
"item_similarities_bi_directional_sorted")
self._exec_shell("sort -T /cube/services/batch/temp %s > %s" %
(input_path, output_path))
def do_extract_top_n(self):
from similarity_calculation.extract_top_n import extract_top_n
input_path = self.getWorkFile(
"item_similarities_bi_directional_sorted")
output_path = self.getWorkFile("item_similarities_top_n")
n = 20
extract_top_n(input_path, output_path, n)
def do_upload_item_similarities_result(self):
from common.utils import UploadItemSimilarities
input_path = self.getWorkFile("item_similarities_top_n")
uis = UploadItemSimilarities(connection, SITE_ID, self.type)
uis(input_path)
class VSimiliarityCalcFlow(BaseSimilarityCalcFlow):
def __init__(self):
BaseSimilarityCalcFlow.__init__(self, "V")
def getExtractUserItemMatrixJobs(self):
return [self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix]
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import v_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
v_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
class PLOSimilarityCalcFlow(BaseSimilarityCalcFlow):
def __init__(self):
BaseSimilarityCalcFlow.__init__(self, "PLO")
def getExtractUserItemMatrixJobs(self):
return [self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix]
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import plo_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
plo_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
class BuyTogetherSimilarityFlow(BaseSimilarityCalcFlow):
def __init__(self):
BaseSimilarityCalcFlow.__init__(self, "BuyTogether")
def getExtractUserItemMatrixJobs(self):
return [self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix]
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import buytogether_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
buytogether_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
class ViewedUltimatelyBuyFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "ViewedUltimatelyBuy")
self.jobs += [self.do_extract_user_view_buy_logs,
self.do_sort_user_view_buy_logs,
self.do_pair_view_buy,
self.count_pairs,
self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix,
self.count_item_view,
self.upload_viewed_ultimately_buy]
def do_extract_user_view_buy_logs(self):
from viewed_ultimately_buy.extract_user_view_buy_logs import extract_user_view_buy_logs
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_view_buy_logs")
extract_user_view_buy_logs(input_path, output_path)
def do_sort_user_view_buy_logs(self):
input_path = self.getWorkFile("user_view_buy_logs")
output_path = self.getWorkFile("user_view_buy_logs_sorted")
self._exec_shell("sort -T /cube/services/batch/temp <%s >%s" %
(input_path, output_path))
def do_pair_view_buy(self):
from viewed_ultimately_buy.pair_view_buy import pair_view_buy
input_path = self.getWorkFile("user_view_buy_logs_sorted")
output_path = self.getWorkFile("view_buy_pairs")
pair_view_buy(input_path, output_path)
def count_pairs(self):
input_path = self.getWorkFile("view_buy_pairs")
output_path = self.getWorkFile("view_buy_pairs_counted")
self._exec_shell("sort -T /cube/services/batch/temp <%s | uniq -c >%s" %
(input_path, output_path))
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import v_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
v_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
def count_item_view(self):
# FIXME a hack
input_path = self.getWorkFile("user_item_matrix")
output_path = self.getWorkFile("item_view_times")
self._exec_shell(
"cut -d , -f 2 <%s | sort -T /cube/services/batch/temp | uniq -c >%s" %
(input_path, output_path))
def upload_viewed_ultimately_buy(self):
from viewed_ultimately_buy.upload_viewed_ultimately_buy import upload_viewed_ultimately_buy
item_view_times_path = self.getWorkFile("item_view_times")
view_buy_pairs_counted_path = self.getWorkFile(
"view_buy_pairs_counted")
upload_viewed_ultimately_buy(
connection, SITE_ID, item_view_times_path, view_buy_pairs_counted_path)
class EDMRelatedPreprocessingFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "ViewedUltimatelyBuy")
self.jobs += [self.do_update_user_orders_collection,
self.do_generate_edm_emailing_list]
def do_update_user_orders_collection(self):
from edm_calculations import doUpdateUserOrdersCollection
doUpdateUserOrdersCollection(connection, SITE_ID)
def do_generate_edm_emailing_list(self):
from edm_calculations import generateEdmEmailingList
generateEdmEmailingList(connection, SITE_ID)
class BeginFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "Root")
self.jobs += [self.begin]
def begin(self):
pass
# TODO: removed items' similarities should also be removed.
begin_flow = BeginFlow()
preprocessing_flow = PreprocessingFlow()
preprocessing_flow.dependOn(begin_flow)
hive_based_statistics_flow = HiveBasedStatisticsFlow()
hive_based_statistics_flow.dependOn(preprocessing_flow)
v_similarity_calc_flow = VSimiliarityCalcFlow()
v_similarity_calc_flow.dependOn(preprocessing_flow)
plo_similarity_calc_flow = PLOSimilarityCalcFlow()
plo_similarity_calc_flow.dependOn(preprocessing_flow)
buy_together_similarity_flow = BuyTogetherSimilarityFlow()
buy_together_similarity_flow.dependOn(preprocessing_flow)
viewed_ultimately_buy_flow = ViewedUltimatelyBuyFlow()
viewed_ultimately_buy_flow.dependOn(preprocessing_flow)
#edm_related_preprocessing_flow = EDMRelatedPreprocessingFlow()
# edm_related_preprocessing_flow.dependOn(preprocessing_flow)
def createCalculationRecord(site_id):
calculation_id = str(uuid.uuid4())
record = {
"calculation_id": calculation_id, "begin_datetime": datetime.datetime.now(),
"flows": {}}
calculation_records = getSiteDBCollection(
connection, site_id, "calculation_records")
calculation_records.save(record)
return calculation_id
def getCalculationRecord(site_id, calculation_id):
calculation_records = getSiteDBCollection(
connection, site_id, "calculation_records")
return calculation_records.find_one({"calculation_id": calculation_id})
def updateCalculationRecord(site_id, record):
calculation_records = getSiteDBCollection(
connection, site_id, "calculation_records")
calculation_records.save(record)
def writeFailedJob(site_id, flow_name, failed_job_name):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
flow_record = record["flows"][flow_name]
flow_record["failed_job_name"] = failed_job_name
updateCalculationRecord(SITE_ID, record)
def writeFlowBegin(site_id, flow_name):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
logging.info("FlowBegin: %s" % (flow_name, ))
record["flows"][flow_name] = {"begin_datetime": datetime.datetime.now()}
updateCalculationRecord(SITE_ID, record)
def writeFlowEnd(site_id, flow_name, is_successful, is_skipped, err_msg=None):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
logging.info("FlowEnd: %s" % (flow_name, ))
flow_record = record["flows"][flow_name]
flow_record["end_datetime"] = datetime.datetime.now()
flow_record["is_successful"] = is_successful
flow_record["is_skipped"] = is_skipped
if not is_successful:
flow_record["err_msg"] = err_msg
updateCalculationRecord(SITE_ID, record)
def writeCalculationEnd(site_id, is_successful, err_msg=None):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
record["end_datetime"] = datetime.datetime.now()
record["is_successful"] = is_successful
if not is_successful:
record["err_msg"] = err_msg
updateCalculationRecord(SITE_ID, record)
def getManualCalculationSites():
result = []
for site in loadSites(connection):
manual_calculation_list = connection[
"tjb-db"]["manual_calculation_list"]
record_in_db = manual_calculation_list.find_one(
{"site_id": site["site_id"]})
if record_in_db is not None:
result.append(site)
return result
def updateSiteLastUpdateTs(site_id):
sites = connection["tjb-db"]["sites"]
sites.update({"site_id": site_id},
{"$set": {"last_update_ts": time.time()}})
def is_time_okay_for_automatic_calculation():
now = datetime.datetime.now()
return now.hour >= 0 and now.hour < 6
def loadSites(connection, site_ids=None):
c_sites = connection["tjb-db"]["sites"]
if site_ids:
return [site for site in c_sites.find({'available': 'on'}) if site["site_id"] in site_ids]
else:
return [site for site in c_sites.find({'available': 'on'})]
def workOnSite(site, is_manual_calculation=False):
calculation_result = None
# Pop a job
manual_calculation_list = connection["tjb-db"]["manual_calculation_list"]
record_in_db = manual_calculation_list.find_one(
{"site_id": site["site_id"]})
if record_in_db is not None:
manual_calculation_list.remove(record_in_db)
# Proceed the job
now = time.time()
is_time_interval_okay_for_auto | |
# -*- coding: utf-8 -*-
"""
profiling.__main__
~~~~~~~~~~~~~~~~~~
The command-line interface to profile a script or view profiling results.
.. sourcecode:: console
$ profiling --help
:copyright: (c) 2014-2017, What! Studio
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import datetime
from functools import partial, wraps
import importlib
import os
try:
import cPickle as pickle
except ImportError:
import pickle
import runpy
import signal
import socket
from stat import S_ISREG, S_ISSOCK
import sys
import threading
import time
import traceback
import click
from click_default_group import DefaultGroup
from six import exec_
from six.moves import builtins
from six.moves.configparser import ConfigParser, NoOptionError, NoSectionError
from profiling import remote, sampling, tracing
from profiling.__about__ import __version__
from profiling.profiler import Profiler
from profiling.remote.background import BackgroundProfiler
from profiling.remote.client import FailoverProfilingClient, ProfilingClient
from profiling.remote.select import SelectProfilingServer
from profiling.sampling import samplers, SamplingProfiler
from profiling.tracing import timers, TracingProfiler
from profiling.viewer import bind_game_keys, bind_vim_keys, StatisticsViewer
__all__ = ['cli', 'profile', 'view']
DEFAULT_ENDPOINT = '127.0.0.1:8912'
class ProfilingCLI(DefaultGroup):
def __init__(self, *args, **kwargs):
super(ProfilingCLI, self).__init__(*args, **kwargs)
self.command_name_aliases = {}
def command(self, *args, **kwargs):
"""Usage::
@cli.command(aliases=['ci'])
def commit():
...
"""
aliases = kwargs.pop('aliases', None)
decorator = super(ProfilingCLI, self).command(*args, **kwargs)
if aliases is None:
return decorator
def _decorator(f):
cmd = decorator(f)
for alias in aliases:
self.command_name_aliases[alias] = cmd.name
return cmd
return _decorator
def get_command(self, ctx, cmd_name):
# Resolve alias.
try:
cmd_name = self.command_name_aliases[cmd_name]
except KeyError:
pass
return super(ProfilingCLI, self).get_command(ctx, cmd_name)
@click.command('profiling', cls=ProfilingCLI, default='profile')
@click.version_option(__version__)
def cli():
sys.path.insert(0, os.curdir)
bind_vim_keys()
bind_game_keys()
class read_config(object):
"""Reads a config once in a Click context."""
filenames = ['setup.cfg', '.profiling']
ctx_and_config = (None, None)
def __new__(cls):
ctx, config = cls.ctx_and_config
current_ctx = click.get_current_context(silent=True)
if current_ctx != ctx:
config = ConfigParser()
config.read(cls.filenames)
cls.ctx_and_config = (current_ctx, config)
return config
def option_getter(type):
"""Gets an unbound method to get a configuration option as the given type.
"""
option_getters = {None: ConfigParser.get,
int: ConfigParser.getint,
float: ConfigParser.getfloat,
bool: ConfigParser.getboolean}
return option_getters.get(type, option_getters[None])
def config_default(option, default=None, type=None, section=cli.name):
"""Guesses a default value of a CLI option from the configuration.
::
@click.option('--locale', default=config_default('locale'))
"""
def f(option=option, default=default, type=type, section=section):
config = read_config()
if type is None and default is not None:
# detect type from default.
type = builtins.type(default)
get_option = option_getter(type)
try:
return get_option(config, section, option)
except (NoOptionError, NoSectionError):
return default
return f
def config_flag(option, value, default=False, section=cli.name):
"""Guesses whether a CLI flag should be turned on or off from the
configuration. If the configuration option value is same with the given
value, it returns ``True``.
::
@click.option('--ko-kr', 'locale', is_flag=True,
default=config_flag('locale', 'ko_KR'))
"""
class x(object):
def __bool__(self, option=option, value=value,
default=default, section=section):
config = read_config()
type = builtins.type(value)
get_option = option_getter(type)
try:
return get_option(config, section, option) == value
except (NoOptionError, NoSectionError):
return default
__nonzero__ = __bool__
return x()
def get_title(src_name, src_type=None):
"""Normalizes a source name as a string to be used for viewer's title."""
if src_type == 'tcp':
return '{0}:{1}'.format(*src_name)
return os.path.basename(src_name)
def make_viewer(mono=False, watch=None, *loop_args, **loop_kwargs):
"""Makes a :class:`profiling.viewer.StatisticsViewer` with common options.
"""
viewer = StatisticsViewer(watch=watch)
loop = viewer.loop(*loop_args, **loop_kwargs)
if mono:
loop.screen.set_terminal_properties(1)
return (viewer, loop)
def spawn_thread(func, *args, **kwargs):
"""Spawns a daemon thread."""
thread = threading.Thread(target=func, args=args, kwargs=kwargs)
thread.daemon = True
thread.start()
return thread
def spawn(mode, func, *args, **kwargs):
"""Spawns a thread-like object which runs the given function concurrently.
Available modes:
- `threading`
- `greenlet`
- `eventlet`
"""
if mode is None:
# 'threading' is the default mode.
mode = 'threading'
elif mode not in spawn.modes:
# validate the given mode.
raise ValueError('Invalid spawn mode: %s' % mode)
if mode == 'threading':
return spawn_thread(func, *args, **kwargs)
elif mode == 'gevent':
import gevent
import gevent.monkey
gevent.monkey.patch_select()
gevent.monkey.patch_socket()
return gevent.spawn(func, *args, **kwargs)
elif mode == 'eventlet':
import eventlet
eventlet.patcher.monkey_patch(select=True, socket=True)
return eventlet.spawn(func, *args, **kwargs)
assert False
spawn.modes = ['threading', 'gevent', 'eventlet']
#: Just returns the first argument.
noop = lambda x: x
def import_(module_name, name):
"""Imports an object by a relative module path::
Profiler = import_('profiling.profiler', 'Profiler')
"""
module = importlib.import_module(module_name, __package__)
return getattr(module, name)
#: Makes a function which import an object by :func:`import_` lazily.
importer = lambda module_name, name: partial(import_, module_name, name)
# custom parameter types
class Class(click.ParamType):
def __init__(self, modules, base, base_name=None, postfix=True):
self.modules = modules
self.base = base
self.base_name = base_name
self.postfix = postfix
def convert(self, value, param, ctx):
if value == self.base_name:
return self.base
name = value.title()
if self.postfix:
name += self.base.__name__.title()
for mod in self.modules:
try:
cls = getattr(mod, name)
except AttributeError:
continue
if not isinstance(cls, type):
continue
elif not issubclass(cls, self.base):
continue
return cls
self.fail('%s not found' % name)
def get_metavar(self, param):
return self.base.__name__.upper()
class Script(click.File):
"""A parameter type for Python script."""
def __init__(self):
super(Script, self).__init__('rb')
def convert(self, value, param, ctx):
with super(Script, self).convert(value, param, ctx) as f:
filename = f.name
code = compile(f.read(), filename, 'exec')
globals_ = {'__file__': filename, '__name__': '__main__',
'__package__': None, '__doc__': None}
return (filename, code, globals_)
def get_metavar(self, param):
return 'PYTHON'
class Module(click.ParamType):
def convert(self, value, param, ctx):
# inspired by @htch's fork.
# https://github.com/htch/profiling/commit/4a4eb6e
try:
detail = runpy._get_module_details(value)
except ImportError as exc:
ctx.fail(str(exc))
try:
# since Python 3.4.
mod_name, mod_spec, code = detail
except ValueError:
mod_name, loader, code, filename = detail
else:
loader = mod_spec.loader
filename = mod_spec.origin
# follow runpy's behavior.
pkg_name = mod_name.rpartition('.')[0]
globals_ = sys.modules['__main__'].__dict__.copy()
globals_.update(__name__='__main__', __file__=filename,
__loader__=loader, __package__=pkg_name)
return (filename, code, globals_)
def get_metavar(self, param):
return 'PYTHON-MODULE'
class Command(click.ParamType):
def convert(self, value, param, ctx):
filename = '<string>'
code = compile(value, filename, 'exec')
globals_ = {'__name__': '__main__',
'__package__': None, '__doc__': None}
return (filename, code, globals_)
def get_metavar(self, param):
return 'PYTHON-COMMAND'
class Endpoint(click.ParamType):
"""A parameter type for IP endpoint."""
def convert(self, value, param, ctx):
host, port = value.split(':')
port = int(port)
return (host, port)
def get_metavar(self, param):
return 'HOST:PORT'
class ViewerSource(click.ParamType):
"""A parameter type for :class:`profiling.viewer.StatisticsViewer` source.
"""
def convert(self, value, param, ctx):
src_type = False
try:
mode = os.stat(value).st_mode
except OSError:
try:
src_name = Endpoint().convert(value, param, ctx)
except ValueError:
pass
else:
src_type = 'tcp'
else:
src_name = value
if S_ISSOCK(mode):
src_type = 'sock'
elif S_ISREG(mode):
src_type = 'dump'
if not src_type:
raise ValueError('Dump file or socket address required.')
return (src_type, src_name)
def get_metavar(self, param):
return 'SOURCE'
class SignalNumber(click.ParamType):
"""A parameter type for signal number."""
@staticmethod
def name_of(signum):
for name, value in signal.__dict__.items():
if signum == value:
if name.startswith('SIG') and not name.startswith('SIG_'):
return name
return str(signum)
def convert(self, value, param, ctx):
if isinstance(value, int):
return value
elif value.isdigit():
return int(value)
signame = value.upper()
if not signame.startswith('SIG'):
signame = 'SIG' + signame
if signame.startswith('SIG_'):
self.fail('Invalid signal %s' % signame)
try:
signum = getattr(signal, signame)
except AttributeError:
self.fail('Unknown signal %s' % signame)
return signum
def get_metavar(self, param):
return 'SIGNUM'
# common parameters
class Params(object):
def __init__(self, params):
self.params = params
def __call__(self, f):
for param in self.params[::-1]:
f = param(f)
return f
def __add__(self, params):
return self.__class__(self.params + params)
def profiler_options(f):
# tracing profiler options
@click.option(
'-T', '--tracing', 'import_profiler_class',
flag_value=importer('profiling.tracing', 'TracingProfiler'),
default=config_flag('profiler', 'tracing', True),
help='Use tracing profiler. (default)')
@click.option(
'--timer', 'timer_class',
type=Class([timers], timers.Timer, 'basic'),
default=config_default('timer', 'basic'),
help='Choose CPU timer for tracing profiler. (basic|thread|greenlet)')
# sampling profiler options
@click.option(
'-S', '--sampling', 'import_profiler_class',
flag_value=importer('profiling.sampling', 'SamplingProfiler'),
default=config_flag('profiler', 'sampling', False),
help='Use sampling profiler.')
@click.option(
'--sampler', 'sampler_class',
type=Class([samplers], samplers.Sampler),
default=config_default('sampler', 'itimer'),
help='Choose frames sampler for sampling profiler. (itimer|tracing)')
@click.option(
'--sampling-interval', type=float,
default=config_default('sampling-interval', samplers.INTERVAL),
help='How often sample. (default: %.3f cpu sec)' % samplers.INTERVAL)
# etc
@click.option(
'--pickle-protocol', type=int,
default=config_default('pickle-protocol', remote.PICKLE_PROTOCOL),
help='Pickle protocol to dump result.')
@wraps(f)
def wrapped(import_profiler_class, timer_class, sampler_class,
sampling_interval, **kwargs):
profiler_class = import_profiler_class()
assert issubclass(profiler_class, Profiler)
if issubclass(profiler_class, TracingProfiler):
# profiler requires timer.
timer_class = timer_class or tracing.TIMER_CLASS
timer = timer_class()
profiler_kwargs = {'timer': timer}
elif issubclass(profiler_class, SamplingProfiler):
sampler_class = sampler_class or sampling.SAMPLER_CLASS
sampler = sampler_class(sampling_interval)
profiler_kwargs = {'sampler': sampler}
else:
profiler_kwargs = {}
profiler_factory = partial(profiler_class, **profiler_kwargs)
return f(profiler_factory=profiler_factory, **kwargs)
return wrapped
def profiler_arguments(f):
@click.argument('argv', nargs=-1)
@click.option('-m', 'module', type=Module(),
help='Run library module as a script.')
@click.option('-c', 'command', type=Command(),
help='Program passed in as string.')
@wraps(f)
def wrapped(argv, module, command, **kwargs):
if module is not None and command is not None:
raise click.UsageError('Option -m and -c are exclusive')
script = module or command
if script is None:
# -m and -c not passed.
try:
script_filename, argv = argv[0], argv[1:]
except IndexError:
raise click.UsageError('Script not specified')
script = Script().convert(script_filename, None, None)
kwargs.update(script=script, argv=argv)
return f(**kwargs)
return wrapped
viewer_options = Params([
click.option('--mono', is_flag=True, help='Disable coloring.'),
click.option('--watch', help='Filter traces with a specific statement.'),
])
onetime_profiler_options = Params([
click.option(
'-d', '--dump', 'dump_filename', | |
<filename>compare_file_lists.py
#!/usr/bin/python3
# This scrpit has been created to compare two large areas of data storage and find which files are missing in the main data area, but are present in the backup and therefore need to be copied over still.
# This script will compare lists of files (which have a directory path and a check sum) and produce the following output lists:
# - files that occur in both lists
# - files that are missing in directory a but are in directory b
# - files that are missing in directory b but are in directory a
# We will think of directory a as being the principal copy of the data, i.e. ALL the files should exist here. Directory b is the backup and generally has various copies of the data that should be in directory a.
# Import packages
import csv
import os
import datetime
import pprint
def get_current_date():
"""Get the current date and write it in the format YYYYMMDD"""
now = datetime.datetime.now()
year = str(now.year)
month = "{:02}".format(now.month)
day = "{:02}".format(now.day)
todays_date = year + month + day
return todays_date
def get_current_time():
"""Get the current time and write it in the format HHMMDD (24-hour)."""
now = datetime.datetime.now()
hour = "{:02}".format(now.hour)
minute = "{:02}".format(now.minute)
second = "{:02}".format(now.second)
time_now = hour + minute + second
return time_now
def how_to_do_file_comparison():
"""This function takes an input from the user who decides how the file comparison is going to work: this can be by file list (the list of all files with their sha1sum) or by storage location (which is basically the directory of files). It outputs a string which then decides how the rest of the script runs."""
#method = str(input("Would you like to compare the files by directory (eg. ace_data vs. ace_data_end_of_leg4) or by storage location (eg. spinas1 vs. spinas1-migr)? Enter directory or storage location. "))
method = str(input("Would you like to compare selected file lists or files by storage location? Please enter: file lists OR storage location "))
if method == "file lists" or method == "storage location":
print("OK. This script will compare by ", method)
else:
print(
"Your input was invalid. It should be file lists or storage location. This script will now exit. Please retry.")
exit
return method
def get_storage_locations_to_compare(possible_storage_locations):
"""This function will ask the user for the storage locations to compare and output them in a tuple."""
storage_location1 = None
storage_location2 = None
print("The possible storage locations are: ")
pprint.pprint(possible_storage_locations)
while storage_location1 not in possible_storage_locations:
storage_location1 = str(input("Type the name of the first storage location that you would like to compare. "))
if storage_location1 in possible_storage_locations:
print("Going to compare ", storage_location1)
else:
print("That storage location does not exist. Please type another storage location. ")
while storage_location2 not in possible_storage_locations:
storage_location2 = str(input("Type the name of the second storage location that you would like to compare. "))
if storage_location2 in possible_storage_locations:
print("Going to compare ", storage_location2)
else:
print("That storage location does not exist. Please type another storage location. ")
storage_locations = (storage_location1, storage_location2)
return storage_locations
def create_list_of_file_lists(possible_storage_locations, dir_path_to_files, dir_name_appendix):
"""This function creates a list of the files within each of the file storage locations so the user can inspect them to look for comparison options."""
# Output list of files
files_to_compare = []
for location in possible_storage_locations:
location_filepath = dir_path_to_files + location + "_" + dir_name_appendix
print("Filepath: ", location_filepath)
os.chdir(location_filepath)
all_files = os.listdir()
for file in all_files:
files_to_compare.append(file)
return files_to_compare
def get_directories_to_compare(possible_directory_locations):
"""This function will ask the user for a directory (eg. ace_data) that they would like to compare, and the other (can be >=1) directories that they would like to compare it to. They will also have to select this from the list of available directories."""
directory1 = None
directory2 = None
while directory1 not in possible_directories:
directory1 = str(input("Type the name of the first directory that you would like to compare. "))
if directory1 in possible_directories:
print("Going to compare ", directory1)
else:
print("That directory does not exist. Please type another directory. ")
while directory2 not in possible_directories:
directory2 = str(input("Type the name of the second directory that you would like to compare. "))
if directory2 in possible_directories:
print("Going to compare ", directory2)
else:
print("That directory does not exist. Please type another directory. ")
directories = (directory1, directory2)
return directories
def dict_files_in_storage_location(storage_location, dir_path_to_files):
"""Create a dictionary of all of the files in a storage location with the directory as the key and the storage location and filename in a tuple."""
os.chdir(dir_path_to_files+"/"+storage_location+"_"+dir_name_appendix)
file_list = os.listdir()
dict_files = {}
for file in file_list:
directory = get_directory_from_filename(file, filename_appendix)
dict_files_location = {directory: (storage_location, file)}
dict_files.update(dict_files_location)
return dict_files
def compare_dictionaries_on_key(dictionary1, dictionary2):
"""Compare two dictionaries on a key that is defined. Output matching results in a list of lists."""
comparison_files = []
for key in dictionary1.keys():
compare_these_pairs = []
if key in dictionary2.keys():
compare_these_pairs = [key, dictionary1[key][1], dictionary2[key][1]]
comparison_files.append(compare_these_pairs)
return comparison_files
def get_filename(filepath):
"""Get the filename from a full filepath."""
filename = os.path.basename(filepath)
return filename
def split_filename(filename):
"""Get the different parts of the filename."""
filename_split = filename.split("_")
return filename_split
def get_storage_location_from_filename(filename):
"""Get the storage location from the split filename (tuple) and output as string."""
storage_location = split_filename(filename)[0]
return storage_location
def get_directory_from_filename(filename, filename_appendix):
"""Get the directory from the split filename (tuple) and output as string."""
storage_location = get_storage_location_from_filename(filename)
#print("storage_location: ", storage_location)
remainder = filename.split(storage_location)
#print("remainder: ", remainder)
directory = remainder[1].split(filename_appendix)[0].strip("_")
return directory
def create_list_from_file(input_file):
"""Read a file into a list of lists where the nested list is split on whitespace."""
file_list = []
with open(input_file) as inputfile:
for line in inputfile:
file_list.append(line.strip().split(' ', 1)) # the check sum and file name are separated by a double whitespace
# print(file_list)
return file_list
def check_length_list(input_file, file_list):
"""Check that the length of an input file is the same as the list from reading it in. Puts the file type in the printed output for clarity."""
count_file_length = len(open(input_file).readlines())
count_file_list = len(file_list)
if count_file_length != count_file_list:
print(input_file, "file length: ", count_file_length)
print(input_file, "file list: ", count_file_list)
else:
print(input_file, " file length is the same as the list of files: all of the files have been read in. Number of files: ", count_file_list)
return count_file_list
def nested_lists_to_sets(nested_lists):
"""Convert a list of lists (nested lists) into a list of tuples then convert this to a set."""
list_of_tuples = [tuple(l) for l in nested_lists]
output_set = set(list_of_tuples)
return output_set
def difference_between_sets(set_a, set_b):
"""Find elements that are in set b but not in set a."""
missing_elements = set_b - set_a
print("There are ", len(missing_elements), " missing elements.")
return missing_elements
def write_set_to_file(set_name, output_file):
"""Read through a set and output the elements to a file."""
try:
output = open(output_file, 'w')
except IOError:
print("Not able to open outfile: ", output_file)
exit(1)
else:
with output:
writer = csv.writer(output, lineterminator='\n')
for element in set_name:
writer.writerows([element])
print(element, " written to file")
def create_log(information, output_file):
"""This function creates a log of all of the comparisons that have been done, when and what they have produced."""
try:
output = open(output_file, 'a')
except IOError:
print("Not able to open outfile: ", output_file)
exit(1)
else:
with output:
writer = csv.writer(output, lineterminator='\n')
writer.writerows(information)
print(information, " written to file")
###########################
# The following details are specific to comparing the ace data.
possible_storage_locations = ['testspinas1', 'testspinas1-migr']
possible_directories = ['ace_data', 'data_admin', 'work_leg1', 'work_leg4']
dir_path_to_files = '/home/jen/projects/ace_data_management/wip/checking_nas/'
logfile = dir_path_to_files + "checking_log.txt"
filename_appendix = "sha1sum_output.txt"
dir_name_appendix = "compiled_output"
###########################
# Ask the user how to deal with the file comparison.
method_of_file_comparison = how_to_do_file_comparison()
def compare_storage_locations(possible_storage_locations):
"""This function compares the files of file lists by storage location."""
# Get a tuple of the storage locations that the user wants to compare:
storage_locations = get_storage_locations_to_compare(possible_storage_locations)
print("storage locations: ", storage_locations)
storage_location1 = storage_locations[0]
print("SL1: ", storage_location1)
storage_location2 = storage_locations[1]
print("SL2: ", storage_location2)
# Create a dictionary of the files to compare.
files_storage_location1 = dict_files_in_storage_location(storage_location1, dir_path_to_files)
print("files SL1: ", files_storage_location1)
files_storage_location2 = dict_files_in_storage_location(storage_location2, dir_path_to_files)
print("files SL2: ", files_storage_location2)
# Compare the dictionaries of files on the key (directory) and output a list of pairs of files (in lists) to compare.
files_to_compare | |
curRow + (N - 1) * dimx
pointCon = []
for constr in self.pointConstr:
pointCon.append(y[curN: curN + constr.nf])
curN += constr.nf
multiPointCon = []
for constr in self.multiPointConstr:
multiPointCon.append(y[curN: curN + constr.nf])
curN += constr.nf
pathCon = []
for constr in self.pathConstr:
pathCon.append(np.reshape(y[curN: curN + N * constr.nf], (N, constr.nf)))
curN += N * constr.nf
nonLinCon = []
for constr in self.nonLinConstr:
nonLinCon.append(y[curN: curN + constr.nf])
curN += constr.nf
# check bounds, return a -1, 1 value for non-equality bounds, and 0 for equality bounds
useX, useU, useP = self.__parseX__(guess)
Xbound = checkInBounds(useX, self.xbd)
x0bound = checkInBounds(useX[0], self.x0bd)
xfbound = checkInBounds(useX[-1], self.xfbd)
ubound = checkInBounds(useU, self.ubd)
if self.dimp > 0:
pbound = checkInBounds(useP, self.pbd)
else:
pbound = None
if self.t0ind > 0:
t0bound = checkInBounds(guess[self.t0ind], self.t0)
else:
t0bound = None
if self.tfind > 0:
tfbound = checkInBounds(guess[self.tfind], self.tf)
else:
tfbound = None
if self.lenAddX > 0:
addx = self.__parseAddX__(guess)
addXbound = [checkInBounds(addx_, [addx__.lb, addx__.ub]) for addx_, addx__ in zip(addx, self.addX)]
else:
addXbound = None
result = {'obj': obj, 'dyn': dynCon, 'Xbd': Xbound, 'Ubd': ubound, 'x0bd': x0bound, 'xfbd': xfbound, 'Pbd': pbound, 't0bd': t0bound, 'tfbd': tfbound, 'addXbd': addXbound}
if pointCon:
result['point'] = pointCon
if multiPointCon:
result['mpoint'] = multiPointCon
if pathCon:
result['path'] = pathCon
if nonLinCon:
result['nonlin'] = nonLinCon
return result
def parse_sol(self, sol):
"""Call parseX function from utility and return a dict of solution.
:param sol: ndarray, the solution.
"""
X, U, P = self.__parseX__(sol)
if self.dimp == 0:
P = None
h, tgrid = self.__get_time_grid__(sol)
obj = self.__parseObj__(sol)
if self.lenAddX == 0:
return {'t': tgrid, 'x': X, 'u': U, 'p': P, 'obj': obj}
else:
return {'t': tgrid, 'x': X, 'u': U, 'p': P, 'addx': self.__parseAddX__(sol), 'obj': obj}
def parseSol(self, sol):
"""Alias for :func:`~trajOptLib.TrajOptProblem.parse_sol`"""
return self.parse_sol(sol)
def __get_time_grid__(self, x):
"""Based on initial guess x, get the time grid for discretization.
:param x: ndarray, the guess/sol.
:returns: h: float, grid size
:returns: useT: the grid being used
"""
if self.fixt0:
uset0 = self.t0
else:
uset0 = x[self.t0ind]
if self.fixtf:
usetf = self.tf
else:
usetf = x[self.tfind]
h = (usetf - uset0) / (self.N - 1)
useT = np.linspace(uset0, usetf, self.N)
return h, useT
def __parseObj__(self, x):
if self.snopt_mode:
return x[self.numSol - self.objaddn:]
else:
return 0
def __parseAddX__(self, x):
numTraj = self.numTraj
addX = []
for addx in self.addX:
addX.append(x[numTraj: numTraj + addx.n])
numTraj += addx.n
return addX
def __callf__(self, x, y): # TODO: remove callf case
"""Evaluate those constraints and objective functions."""
h, useT = self.__getTimeGrid(x)
useX, useU, useP = self.__parseX__(x)
# evaluate objective function
self.__objModeF__(0, h, useT, useX, useU, useP, x, y)
# evaluate the system dynamics constraint
curRow = 1
curRow = self.__dynconstrModeF__(curRow, h, useT, useX, useU, useP, y)
# evaluate other constraints
curRow = self.__constrModeF__(curRow, h, useT, useX, useU, useP, x, y)
return curRow
def __objModeF__(self, curRow, h, useT, useX, useU, useP, x, y):
"""Calculate objective function. F mode
:param curRow: int, index from which we write on
:param h, useT, useX, useU, useP: parsed solution
:param x: ndarray, the sol, it is used for linear constraints
:param y: ndarray, the F to be written. The first row stores the objective function
"""
y[0] = 0.0
tmpout = np.zeros(1)
for obj in self.linPointObj:
tmpx = np.concatenate(([useT[obj.index]], useX[obj.index], useU[obj.index], useP[obj.index]))
y[0] += obj.A.dot(tmpx)
for obj in self.linPathObj:
for i in range(self.N - 1):
tmpx = np.concatenate(([useT[i]], useX[i], useU[i], useP[i]))
obj.__callf__(tmpx, tmpout)
y[0] += tmpout[0] * h
for obj in self.linearObj:
y[0] += obj.A.dot(x)
for obj in self.nonPointObj:
tmpx = np.concatenate(([useT[obj.index]], useX[obj.index], useU[obj.index], useP[obj.index]))
obj.__callf__(tmpx, tmpout)
y[0] += tmpout[0]
for obj in self.nonPathObj:
for i in range(self.N - 1):
tmpx = np.concatenate(([useT[i]], useX[i], useU[i], useP[i]))
obj.__callf__(tmpx, tmpout)
y[0] += tmpout[0] * h
for obj in self.nonLinObj:
if isinstance(obj, NonLinearObj):
obj.__callf__(x, tmpout)
else: # NonLinearMultiPointObj
xins = [np.concatenate(([useT[idx]], useX[idx], useU[idx], useP[idx])) for idx in obj.indexes]
obj.__callf__(xins, tmpout)
y[0] += tmpout[0]
# add lqr cost, if applicable
if self.lqrObj is not None:
y[0] += self.lqrObjf(h, useX, useU, useP)
def __constrModeF__(self, curRow, h, useT, useX, useU, useP, x, y):
"""Calculate constraint function. F mode
:param curRow: int, index from which we write on
:param h, useT, useX, useU, useP: parsed solution
:param y: ndarray, the F to be written
:returns: curRow: current row after we write on y
"""
for constr in self.pointConstr:
tmpx = np.concatenate(([useT[constr.index]], useX[constr.index], useU[constr.index], useP[constr.index]))
constr.__evalf__(tmpx, y[curRow: curRow + constr.nf])
curRow += constr.nf
for constr in self.multiPointConstr:
xin = [np.concatenate(([useT[idx]], useX[idx], useU[idx], useP[idx])) for idx in constr.indexes]
constr.__evalf__(xin, y[curRow: curRow + constr.nf])
curRow += constr.nf
for constr in self.pathConstr:
for i in range(self.N):
tmpx = np.concatenate(([useT[i]], useX[i], useU[i], useP[i]))
constr.__evalf__(tmpx, y[curRow: curRow + constr.nf])
self.numF
curRow += constr.nf
for constr in self.nonLinConstr:
constr.__evalf__(x, y[curRow: curRow + constr.nf])
curRow += constr.nf
return curRow
def __dynconstrModeF__(self, curRow, h, useT, useX, useU, useP, y):
"""Calculate constraint from dynamics. F mode.
:param curRow: int, index from which we write on
:param h, useT, useX, useU, useP: the parsed sol
:param y: ndarray, the F
:returns: curRow: current row after we write on y
"""
# loop over all system dynamics constraint
cDyn = np.reshape(y[curRow:curRow + (self.N - 1) * self.dimx], (self.N - 1, self.dimx))
for i in range(self.N - 1):
# evaluate gradient of system dynamics
ydot = self.sys.dyn(useT[i], useX[i], useU[i], useP[i])
cDyn[i] = useX[i] + h * ydot - useX[i + 1]
return curRow
def __callg__(self, x, y, G, row, col, rec, needg):
"""Evaluate those constraints, objective functions, and constraints. It simultaneously allocates sparsity matrix.
:param x: ndarray, the solution to the problem
:param y: ndarray, return F
:param G, row, col: ndarray, information of gradient
:param rec, needg: if we record/ if we need gradient
"""
h, useT = self.__getTimeGrid(x)
useX, useU, useP = self.__parseX__(x)
# loop over all system dynamics constraint
curRow = 1
curNg = 0
curRow, curNg = self.__dynconstrModeG(curRow, curNg, h, useT, useX, useU, useP, y, G, row, col, rec, needg)
curRow, curNg = self.__constrModeG(curRow, curNg, h, useT, useX, useU, useP, x, y, G, row, col, rec, needg)
curRow, curNg = self.__objModeG(curRow, curNg, h, useT, useX, useU, useP, x, y, G, row, col, rec, needg)
return curRow, curNg
def __dynconstrModeG(self, curRow, curNg, h, useT, useX, useU, useP, y, G, row, col, rec, needg):
"""Evaluate the constraints imposed by system dynamics"""
dimx, dimu, dimp = self.dimx, self.dimu, self.dimp
cDyn = np.reshape(y[curRow: curRow + (self.N - 1) * self.dimx], (self.N - 1, self.dimx))
for i in range(self.N - 1):
# evaluate gradient of system dynamics TODO: support other types of integration scheme
ydot, Jac = self.sys.jac_dyn(useT[i], useX[i], useU[i], useP[i]) # TODO: support in-place Jacobian
cDyn[i] = useX[i] + h * ydot - useX[i + 1]
if needg:
if not self.dynSparse:
Jac *= h # always useful
baseCol = i * self.dimpoint
# assign a block for x
G[curNg: curNg + dimx * dimx] = (Jac[:, 1:1 + dimx] + np.eye(dimx)).flatten()
if rec:
tmpMat = np.tile(np.arange(dimx), (dimx, 1))
row[curNg: curNg + dimx * dimx] = curRow + tmpMat.T.flatten()
col[curNg: curNg + dimx * dimx] = baseCol + tmpMat.flatten()
curNg += dimx * dimx
# assign a block for u
G[curNg: curNg + dimx * dimu] = Jac[:, 1 + dimx:1 + dimx + dimu].flatten()
if rec:
row[curNg: curNg + dimx * dimu] = curRow + np.tile(np.arange(dimx), (dimu, 1)).T.flatten()
col[curNg: curNg + dimx * dimu] = baseCol + dimx + np.tile(np.arange(dimu), dimx).flatten()
curNg += dimx * dimu
# assign a block for p, if necessary
if dimp > 0:
G[curNg: curNg + dimx * dimp] = Jac[:, 1 + dimx + dimu:1 + dimx + dimu + dimp].flatten()
if rec:
row[curNg: curNg + dimx * dimp] = curRow + np.tile(np.arange(dimx), (dimp, 1)).T.flatten()
col[curNg: curNg + dimx * dimp] = baseCol + dimx + dimu + np.tile(np.arange(dimp), dimx).flatten()
curNg += dimx * dimp
# assign the diagonal block for x_{k+1}
G[curNg: curNg + dimx] = | |
as an integer. By default
it's set to 100 which is a solid colour.
highlight_color - Set the highlighted text colour, by default it's 'gold'
button_color_focused - Using the same format as background you can set the
colour to use for a button when it's focused.
button_trans_focused - Using the same format as transparency you can set the
transparency amount to use on the button when in focus.
button_color_nonfocused - Using the same format as background you can set the
colour to use for buttons when they are not in focus.
button_trans_nonfocused - Using the same format as transparency you can set the
transparency amount to use on the buttons when not in focus.
EXAMPLE CODE:
main_text = 'This is my main text.\n\nYou can add anything you want in here and the slider will allow you to see all the contents.\n\nThis example shows using a blue background colour and a transparency of 90%.\n\nWe have also changed the highlighted_color to yellow.'
my_buttons = ['button 1', 'button 2', 'button 3']
my_choice = koding.Custom_Dialog(main_content=main_text,pos='center',buttons=my_buttons,background='213749',transparency=90,highlight_color='yellow')
dialog.ok('CUSTOM DIALOG 1','You selected option %s'%my_choice,'The value of this is: [COLOR=dodgerblue]%s[/COLOR]'%my_buttons[my_choice])
main_text = 'This is example 2 with no fancy colours, just a fullscreen and a working scrollbar.\n\nYou\'ll notice there are also a few more buttons on this one.\n\nline 1\nline 2\nline 3\nline 4\nline 5\nline 6\nline 7\nline 8\nline 9\nline 10\nline 11\nline 12\nline 13\nline 14\nline 15\nline 16\nline 17\nline 18\nline 19\nline 20\n\nYou get the idea we\'ll stop there!'
my_buttons = ['button 1', 'button 2', 'button 3','button 4', 'button 5', 'button 6','button 7', 'button 8', 'button 9','button 10', 'button 11', 'button 12', 'button 13','button 14', 'button 15', 'button 16','button 17', 'button 18', 'button 19','button 20']
my_choice = koding.Custom_Dialog(main_content=main_text,pos='center',size='fullscreen',buttons=my_buttons)
dialog.ok('CUSTOM DIALOG 2','You selected option %s'%my_choice,'The value of this is: [COLOR=dodgerblue]%s[/COLOR]'%my_buttons[my_choice])
~"""
skin_path = os.path.join(koding_path,"resources","skins","Default","720p")
ACTION = -1
# Convert the transparency percentage to hex
transparency = float(transparency) / 100 * 255
transparency = hex(int(transparency)).split('x')[1]
button_trans_focused = float(button_trans_focused) / 100 * 255
button_trans_focused = hex(int(button_trans_focused)).split('x')[1]
button_trans_nonfocused = float(button_trans_nonfocused) / 100 * 255
button_trans_nonfocused = hex(int(button_trans_nonfocused)).split('x')[1]
# Work out the dialog dimensions
if size == 'fullscreen':
dialog_width = '1280'
dialog_height = '720'
else:
dialog_width, dialog_height = size.split('x')
button_count = len(buttons)
buttons_per_row = (int(dialog_width)-25) / (button_width+25)
if buttons_per_row > button_count:
buttons_per_row = button_count
# work out the number of rows, round up if a float
button_rows = int(button_count/buttons_per_row) + (button_count % buttons_per_row > 0)
# Work out the positioning of the dialog
if pos == 'center':
posx = str( (1280 - int(dialog_width)) / 2)
posy = str( (720 - int(dialog_height)) / 2)
else:
posx, posy = pos.split(',')
# Work out the text area size
text_width = str( int(dialog_width)-80 )
text_height = str( (int(dialog_height)-(50*(button_rows+1)))-70 )
scroll_pos = str( int(text_width)+32 )
button_max = int(dialog_height)-30
# Work out the button positions
if dialog == 'Text':
button_spacing = ( int(dialog_width)-(buttons_per_row*button_width) ) / (buttons_per_row+1)
buttons_dict = {}
counter = 1
row = 1
# Create a dictionary of button positioning
for button in buttons:
if counter > buttons_per_row:
counter = 1
row += 1
# If starting a new line reset the values
if counter > buttons_per_row or counter == 1:
current_pos = button_spacing
counter += 1
else:
current_pos = current_pos+button_width+button_spacing
counter += 1
buttons_dict[button] = [str(current_pos),row]
# Set the dialog template name and new temporary "live" XML
dialog_type = dialog.capitalize()+'.xml'
dialog_new = 'temp.xml'
dialog_path = os.path.join(skin_path,dialog_type)
temp_path = os.path.join(skin_path,dialog_new)
button_num = 100
counter = 1
buttons_code = ''
for button in buttons:
if buttons_dict[button][1] == 1:
onup = 99
else:
onup = button_num-buttons_per_row
# If button is on the last row we set down to scrollbar
if buttons_dict[button][1] == button_rows:
ondown = 99
# Otherwise set down to the item on row below
elif buttons_dict[button][1] != button_rows:
ondown = button_num+buttons_per_row
# Set the vertical position (y) of the buttons
button_y = str( int(text_height)+(buttons_dict[button][1]*50)+40 )
if ( int(text_height) < 200 ) or ( int(button_y) > button_max ):
if size != 'fullscreen':
xbmcgui.Dialog().ok('WE NEED A BIGGER WINDOW!','The amount of buttons sent through do not fit in this window. Either make the button width smaller or make a bigger window')
else:
xbmcgui.Dialog().ok('SMALLER BUTTONS NEEDED!','The amount of buttons sent through do not fit in this window. Either send through less buttons or decrease their width using the button_width param.')
return
button_x = str( buttons_dict[button][0] )
buttons_code += '\
<control type="button" id="%s">\n\
<posx>%s</posx>\n\
<posy>%s</posy>\n\
<width>%s</width>\n\
<height>40</height>\n\
<label>%s</label>\n\
<texturefocus colordiffuse="%s%s">DialogBack.png</texturefocus>\n\
<texturenofocus colordiffuse="%s%s">DialogBack.png</texturenofocus>\n\
<font>font12_title</font>\n\
<textcolor>%s</textcolor>\n\
<focusedcolor>%s</focusedcolor>\n\
<align>center</align>\n\
<onleft>%s</onleft>\n\
<onright>%s</onright>\n\
<onup>%s</onup>\n\
<ondown>%s</ondown>\n\
</control>\n' % (button_num, button_x, button_y, button_width, buttons[counter-1],\
button_trans_focused, button_color_focused, button_trans_nonfocused,\
button_color_nonfocused, text_color, highlight_color, button_num-1,\
button_num+1, onup, ondown)
button_num += 1
counter += 1
# Grab contents of the template and replace with our new values
with open(dialog_path, 'r') as content_file:
content = content_file.read()
content = content.replace('dialog_width',dialog_width)\
.replace('dialog_height',dialog_height)\
.replace('text_width',text_width)\
.replace('text_height',text_height)\
.replace('pos_x',posx)\
.replace('pos_y',posy)\
.replace('PK_Transparency',transparency)\
.replace('PK_Color',background)\
.replace('PK_Text_Color',text_color)\
.replace('PK_Header_Color',header_color)\
.replace('<!-- buttons -->',buttons_code)
# Create the new temp "live" XML
myfile = open(temp_path,'w')
myfile.write(content)
myfile.close()
d=MyDisclaimer(dialog_new,koding_path,header=header,main_content=main_content)
d.doModal()
ACTION = d.ACTION
del d
return ACTION
class MyDisclaimer(xbmcgui.WindowXMLDialog):
def __init__(self,*args,**kwargs):
self.header=kwargs['header']
self.main_content=kwargs['main_content']
self.WINDOW=xbmcgui.Window( 10000 )
self.WINDOW.setProperty( 'PK_Header' , self.header )
self.WINDOW.setProperty( 'PK_Main_Text' , self.main_content )
self.ACTION=-1
def onClick( self, controlID ):
if controlID>=100:
self.ACTION=(controlID-100)
self.close()
elif controlID==12:
self.close()
def onAction( self, action ):
if action in [ 5, 6, 7, 9, 10, 92, 117 ] or action.getButtonCode() in [275,257,261]:
self.close()
#----------------------------------------------------------------
# TUTORIAL #
def Keyboard(heading='',default='',hidden=False,return_false=False,autoclose=False,kb_type='alphanum'):
"""
Show an on-screen keyboard and return the string
CODE: koding.Keyboard([default, heading, hidden, return_false, autoclose, kb_type])
AVAILABLE PARAMS:
heading - Optionally enter a heading for the text box.
default - This is optional, if set this will act as the default text shown in the text box
hidden - Boolean, if set to True the text will appear as hidden (starred out)
return_false - By default this is set to False and when escaping out of the keyboard
the default text is returned (or an empty string if not set). If set to True then
you'll receive a return of False.
autoclose - By default this is set to False but if you want the keyboard to auto-close
after a period of time you can send through an integer. The value sent through needs to
be milliseconds, so for example if you want it to close after 3 seconds you'd send through
3000. The autoclose function only works with standard alphanumeric keyboard types.
kb_type - This is the type of keyboard you want to show, by default it's set to alphanum.
A list of available values are listed below:
'alphanum' - A standard on-screen keyboard containing alphanumeric characters.
'numeric' - An on-screen numerical pad.
'date' - An on-screen numerical pad formatted only for a date.
'time' - An on-screen numerical pad formatted only for a time.
'ipaddress' - An on-screen numerical pad formatted only for an IP Address.
'password' - A standard keyboard but returns value as md5 hash. When typing
the text is starred out, once you've entered the password you'll get another
keyboard pop up asking you to verify. If the 2 match then your md5 has is returned.
EXAMPLE CODE:
mytext = koding.Keyboard(heading='Type in the text you want returned',default='test text')
dialog.ok('TEXT RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
dialog.ok('AUTOCLOSE ENABLED','This following example we\'ve set the autoclose to 3000. That\'s milliseconds which converts to 3 seconds.')
mytext = koding.Keyboard(heading='Type in the text you want returned',default='this will close in 3s',autoclose=3000)
dialog.ok('TEXT RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='Enter a number',kb_type='numeric')
dialog.ok('NUMBER RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
dialog.ok('RETURN FALSE ENABLED','All of the following examples have "return_false" enabled. This means if you escape out of the keyboard the return will be False.')
mytext = koding.Keyboard(heading='Enter a date',return_false=True,kb_type='date')
dialog.ok('DATE RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='Enter a time',return_false=True,kb_type='time')
dialog.ok('TIME RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='IP Address',return_false=True,kb_type='ipaddress',autoclose=5)
dialog.ok('IP RETURNED','You typed in:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
mytext = koding.Keyboard(heading='Password',kb_type='password')
dialog.ok('MD5 RETURN','The md5 for this password is:', '', '[COLOR=dodgerblue]%s[/COLOR]'%mytext)
~"""
from vartools import Decode_String
kb_type = eval( 'xbmcgui.INPUT_%s'%kb_type.upper() )
if hidden:
hidden = eval( 'xbmcgui.%s_HIDE_INPUT'%kb_type.upper() )
keyboard = dialog.input(heading,default,kb_type,hidden,autoclose)
if keyboard != '':
return keyboard
elif not return_false:
return Decode_String(default)
else:
return False
#----------------------------------------------------------------
# TUTORIAL #
def Notify(title, message, duration=2000, icon='special://home/addons/script.module.python.koding.aio/resources/update.png'):
"""
Show a short | |
<gh_stars>1-10
"""
To use, make sure that pyJsonAttrPatternFactory.py is in your MAYA_PLUG_IN_PATH
then do the following:
import maya
maya.cmds.loadPlugin("pyJsonAttrPatternFactory.py")
maya.cmds.listAttrPatterns(patternType=True)
// Return: ["json"]
"""
import os
import sys
import json
import traceback
import maya.api.OpenMaya as omAPI
JsonKeys = None
jsonDebug = None
#======================================================================
def import_helpers():
"""
Equivalent to these import statements but presuming installation into
a utils/ subdirectory of the current directory that's not in the current
PYTHONPATH.
from pyJsonAttrPatternInfo import PyJsonAttrPatternInfo as JsonKeys
from pyJsonAttrPatternInfo import jsonDebug as jsonDebug
The method is set up to still work even if the module path was already
visible in the PYTHONPATH so there's no harm in setting it.
Calling this twice will force re-import of the modules; that's a good
thing for testing since you can unload the plug-in, load it back in and
it will pick up the modified modules.
"""
global JsonKeys
global jsonDebug
# Find the subdirectory of the current directory, that's where modules
# live. Had to do it this way to prevent modules from being misinterpreted
# to be plug-ins themselves.
# The executable is in ..../runTime/bin
# The modules are in ..../runTime/devkit/plug-ins/scripted/modules
#
location = os.environ['MAYA_LOCATION']
moduleDir = os.path.join( location, 'devkit', 'plug-ins', 'scripted', 'modules' )
sys.path.append(moduleDir)
# Load the module information, now visible using the updated path.
module = __import__('pyJsonAttrPatternInfo', globals(), locals(), ["PyJsonAttrPatternInfo", "jsonDebug"], -1)
reload(module) # Might be out of date
# Name the interesting module elements
JsonKeys = module.PyJsonAttrPatternInfo
jsonDebug = module.jsonDebug
# Remove the temporary path entry
del sys.path[-1]
#======================================================================
def maya_useNewAPI():
"""
The presence of this function tells Maya that the plugin produces, and
expects to be passed, objects created using the Maya Python API 2.0.
"""
pass
#======================================================================
class PyJsonAttrPatternFactory(omAPI.MPxAttributePatternFactory):
#----------------------------------------------------------------------
def __init__(self):
omAPI.MPxAttributePatternFactory.__init__(self)
self.currentPattern = None
self.currentAttribute = None
#----------------------------------------------------------------------
@staticmethod
def patternFactoryCreator():
return PyJsonAttrPatternFactory()
#----------------------------------------------------------------------
def reportWarning(self, warningStr):
"""
Report a pattern parsing problem but do not raise a failure exception.
This is for harmless errors that may or may not indicate a problem
(e.g. a misspelled flag name)
"""
print 'WARN: Pattern %s, attribute %s: %s' % (self.currentPattern, self.currentAttribute, warningStr)
#----------------------------------------------------------------------
def reportError(self, errorStr):
"""
Report a pattern parsing error and raise a failure exception.
Unfortunately there doesn't seem to be any way to report the actual
line of the definition that failed (when it could be known) so
the pattern name and attribute name will have to suffice to narrow
down the source of the error.
"""
traceback.print_stack()
raise ValueError( 'ERR: Pattern %s, attribute %s: %s' % (self.currentPattern, self.currentAttribute, errorStr) )
#----------------------------------------------------------------------
def parseStandardFlags(self, attr, flags):
"""
Apply the JSON-described flags to the given attributes. Only check the
flags understood by all attribute types; let specific types handle
their own particular flags.
attr = Created attribute object
flags = Array of flags to set
"""
jsonDebug( 'Parsing %d standard flags for "%s"' % (len(flags), str(attr)) )
for flag in flags:
valueToSet = True
strippedFlag = flag.strip().rstrip()
# If the flag starts with "!" then unset it rather than setting it
if strippedFlag.startswith('!'):
strippedFlag = strippedFlag[1:]
valueToSet = False
# Unrecognized flags will be silently ignored. They might be
# relevant to derived attribute types.
if strippedFlag.lower() in [x.lower() for x in JsonKeys.kFlagFunctions]:
try:
jsonDebug( '--- Set flag %s' % strippedFlag )
jsonDebug( '--- Flag function = %s, value = %s' % ( JsonKeys.kFlagFunctions[strippedFlag.lower()], valueToSet ) )
setattr( attr, JsonKeys.kFlagFunctions[strippedFlag.lower()], valueToSet )
except Exception, e:
self.reportError( 'Failed setting flag %s on attribute %s : %s"' % (strippedFlag, attr.name, str(e)) )
else:
self.reportWarning( 'Unrecognized attribute flag "%s" is ignored' % strippedFlag )
#----------------------------------------------------------------------
def parseCompoundAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing a compound attribute create the
attribute and all children, and set all of the provided flags/members
for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseCompoundAttribute(%s : %s)' % (name, attrInfo) )
attr = None
try:
cAttr = omAPI.MFnCompoundAttribute()
attr = cAttr.create( name, shortName )
# Recursively create all children, and children of children, etc.
if JsonKeys.kKeyChildren in attrInfo:
childInfo = attrInfo[JsonKeys.kKeyChildren]
tmpAttr = self.currentAttribute
for child in childInfo:
jsonDebug( 'Add compound child %s' % child )
childAttr = self.parseAttribute( child )
cAttr.addChild( childAttr )
self.currentAttribute = tmpAttr
except Exception, e:
self.reportError( 'Error creating compound: %s' % str(e) )
return attr
#----------------------------------------------------------------------
def parseEnumAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing an enum attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseEnumAttribute(%s)' % name )
eAttr = omAPI.MFnEnumAttribute()
attr = eAttr.create( name, shortName )
# Look for any fields being specified. (If no field names then the
# attribute can only accept integer values.)
if JsonKeys.kKeyEnumNames in attrInfo:
enumIndex = 0
try:
for enumName in attrInfo[JsonKeys.kKeyEnumNames]:
equalSign = enumName.find('=')
if equalSign >= 0:
enumIndex = int(enumName[equalSign+1:])
enumName = enumName[:equalSign]
eAttr.addField( enumName, enumIndex )
enumIndex += 1
except Exception, e:
self.reportError( 'Bad enum specification: "%s"' % str(e) )
# Set default after creation so that we can handle both enum names and
# index values as a default
if JsonKeys.kKeyDefault in attrInfo:
defaultValue = attrInfo[JsonKeys.kKeyDefault]
jsonDebug( 'Setting the enum default to "%s" of type %s' % (defaultValue, type(defaultValue)) )
if type(defaultValue) == int or str(defaultValue).isdigit():
eAttr.default = defaultValue
else:
eAttr.setDefaultByName( defaultValue )
return attr
#----------------------------------------------------------------------
def parseTypedAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing a typed attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
jsonDebug( 'parseTypedAttribute(%s)' % name )
# First convert the list of accepted types into the equivalent Maya
# enum type values
acceptedTypeEnums = []
acceptedNumericEnums = []
acceptedPluginTypes = []
hasNumeric = False
# Plugin data types are identified by name at runtime, they take
# precedence.
if JsonKeys.kKeyAcceptedPluginTypes in attrInfo:
jsonDebug( '...getting accepted plugin types %s' % attrInfo[JsonKeys.kKeyAcceptedPluginTypes] )
for pluginId in attrInfo[JsonKeys.kKeyAcceptedPluginTypes]:
pId = omAPI.MTypeId()
acceptedPluginTypes.append( pId.create( int(pluginId) ) )
if JsonKeys.kKeyAcceptedTypes in attrInfo:
if 'any' in attrInfo[JsonKeys.kKeyAcceptedTypes]:
acceptedTypeEnums.append( omAPI.MFnData.kAny )
else:
for typeName in attrInfo[JsonKeys.kKeyAcceptedTypes]:
if typeName == 'numeric':
hasNumeric = True
acceptedTypeEnums.append( JsonKeys.kGenericTypes[typeName] )
elif typeName in JsonKeys.kGenericTypes:
jsonDebug( '...getting accepted generic %s' % JsonKeys.kGenericTypes[typeName] )
acceptedTypeEnums.append( JsonKeys.kGenericTypes[typeName] )
else:
self.reportError( 'Bad type name specification: "%s"' % str(typeName) )
if JsonKeys.kKeyAcceptedNumericTypes in attrInfo:
for typeName in attrInfo[JsonKeys.kKeyAcceptedNumericTypes]:
if typeName in JsonKeys.kNumericTypes:
jsonDebug( '...getting accepted numeric %s' % JsonKeys.kNumericTypes[typeName] )
acceptedNumericEnums.append( JsonKeys.kNumericTypes[typeName] )
else:
self.reportError( 'Bad numeric type name specification: "%s"' % str(typeName) )
# Numeric types have to be generic, it's just how the attributes are
if len(acceptedTypeEnums) == 0 and len(acceptedNumericEnums) == 0 and len(acceptedPluginTypes) == 0:
self.reportError( 'Need at least one accepted type' )
# Only one data type means it can be an MFnTypedAttribute, except for
# numeric type which for some reason is not supported in the API
elif len(acceptedTypeEnums) == 1 and len(acceptedNumericEnums) == 0 and len(acceptedPluginTypes) == 0 and not hasNumeric:
jsonDebug( '--- Accepts only one type : %s' % acceptedTypeEnums[0] )
tAttr = omAPI.MFnTypedAttribute()
attr = tAttr.create( name, shortName, acceptedTypeEnums[0] )
jsonDebug( '--- created' )
# One plugin type has a special MFnTypedAttribute constructor
elif len(acceptedTypeEnums) == 0 and len(acceptedNumericEnums) == 0 and len(acceptedPluginTypes) == 1:
jsonDebug( '--- Accepts only one plugin : %s' % acceptedPluginTypes[0] )
tAttr = omAPI.MFnTypedAttribute()
attr = tAttr.create( name, shortName, acceptedPluginTypes[0] )
jsonDebug( '--- created' )
# Every other combination forces a generic attribute
else:
jsonDebug( '--- Accepts multiple or base numeric types' )
tAttr = omAPI.MFnGenericAttribute()
attr = tAttr.create( name, shortName )
for typeEnum in acceptedTypeEnums:
jsonDebug( '--> add data type %s' % typeEnum )
tAttr.addDataType( typeEnum )
for numericEnum in acceptedNumericEnums:
jsonDebug( '--> add numeric type %s' % numericEnum )
tAttr.addNumericType( numericEnum )
for pluginId in acceptedPluginTypes:
jsonDebug( '--> add plugin type %s' % pluginId )
tAttr.addTypeId( pluginId )
jsonDebug( '--- created' )
return attr
#----------------------------------------------------------------------
def parseLightDataAttribute(self, name, shortName, attrInfo):
"""
Given a JSON subsection describing a light data attribute create the
attribute and set all of the provided flags/members for it.
name = Attribute long name
shortName = Attribute short name
attrInfo = JSON object containing the main attribute information
"""
# List of all child attributes with their numeric type and default values
cNames = [ 'direction', 'intensity', 'ambient', 'diffuse', 'specular',
'shadowFraction', 'preShadowIntensity', 'blindData' ]
lightChildren = { cNames[0] : (omAPI.MFnNumericData.k3Float, [0.0,0.0,0.0]),
cNames[1] : (omAPI.MFnNumericData.k3Float, [0.0,0.0,0.0]),
cNames[2] : (omAPI.MFnNumericData.kBoolean, 0),
cNames[3] : (omAPI.MFnNumericData.kBoolean, 0),
cNames[4] : (omAPI.MFnNumericData.kBoolean, 0),
cNames[5] : (omAPI.MFnNumericData.kFloat, 0.0),
cNames[6] : (omAPI.MFnNumericData.kFloat, 0.0),
cNames[7] : (omAPI.MFnNumericData.kAddr, 0) }
jsonDebug( 'parseLightDataAttribute(%s)' % name )
ldAttr = omAPI.MFnLightDataAttribute()
missingNames = []
ldChildren = []
defaultValues = []
for child in cNames:
try:
jsonDebug( 'Creating light data child %s' % child )
childInfo = attrInfo[child]
cName = childInfo[JsonKeys.kKeyName]
jsonDebug( '--- child name %s' % cName )
if JsonKeys.kKeyShortName in childInfo:
cShortName = childInfo[JsonKeys.kKeyShortName]
else:
cShortName = cName
jsonDebug( '--- child short name %s' % cShortName )
if JsonKeys.kKeyDefault in childInfo and child != 'blindData':
jsonDebug( '--- Defining a default' )
defaultValues.append( childInfo[JsonKeys.kKeyDefault] )
else:
jsonDebug( '--- Accepting default 0' )
defaultValues.append( lightChildren[child][1] )
jsonDebug( '--- child default %s' % defaultValues[-1] )
nAttr = omAPI.MFnNumericAttribute()
jsonDebug( '--- created numeric type %s' % lightChildren[child][0] )
ldChildren.append( nAttr.create( cName, cShortName, lightChildren[child][0] ) )
except Exception, e:
jsonDebug( 'Missing data for sub-attribute %s : %s' % (child, str(e)) )
missingNames.append( child )
if len(missingNames) > 0:
self.reportError( 'Not all required subattribute names are present. Add %s' % str(missingNames) | |
from ._constants import (
COORD_X_CENTER,
COORD_Y_CENTER,
COORD_X_OUTER,
COORD_Y_OUTER,
VAR_LON_CENTER,
VAR_LAT_CENTER,
VAR_LON_OUTER,
VAR_LAT_OUTER,
)
from ._plot_helpers import (
infer_cmap_params,
_get_var_label,
_align_grid_var_dims,
_align_plot_var_dims,
)
from ._masking import _mask_antimeridian_quads
from vcm.cubedsphere import GridMetadata
import xarray as xr
import numpy as np
from matplotlib import pyplot as plt
import warnings
from functools import partial
import os
try:
from cartopy import crs as ccrs
import cartopy
except ImportError:
pass
if os.getenv("CARTOPY_EXTERNAL_DOWNLOADER") != "natural_earth":
# workaround to host our own global-scale coastline shapefile instead
# of unreliable cartopy source
cartopy.config["downloaders"][("shapefiles", "natural_earth")].url_template = (
"https://raw.githubusercontent.com/ai2cm/"
"vcm-ml-example-data/main/fv3net/fv3viz/coastline_shapefiles/"
"{resolution}_{category}/ne_{resolution}_{name}.zip"
)
WRAPPER_GRID_METADATA = GridMetadata(
COORD_X_CENTER,
COORD_Y_CENTER,
COORD_X_OUTER,
COORD_Y_OUTER,
"tile",
VAR_LON_CENTER,
VAR_LON_OUTER,
VAR_LAT_CENTER,
VAR_LAT_OUTER,
)
def plot_cube(
ds: xr.Dataset,
var_name: str,
grid_metadata: GridMetadata = WRAPPER_GRID_METADATA,
plotting_function: str = "pcolormesh",
ax: plt.axes = None,
row: str = None,
col: str = None,
col_wrap: int = None,
projection: "ccrs.Projection" = None,
colorbar: bool = True,
cmap_percentiles_lim: bool = True,
cbar_label: str = None,
coastlines: bool = True,
coastlines_kwargs: dict = None,
**kwargs,
):
""" Plots an xr.DataArray containing tiled cubed sphere gridded data
onto a global map projection, with optional faceting of additional dims
Args:
ds:
Dataset containing variable to plotted, along with the grid
variables defining cell center latitudes and longitudes and the
cell bounds latitudes and longitudes, which must share common
dimension names
var_name:
name of the data variable in `ds` to be plotted
grid_metadata:
a vcm.cubedsphere.GridMetadata data structure that
defines the names of plot and grid variable dimensions and the names
of the grid variables themselves; defaults to those used by the
fv3gfs Python wrapper (i.e., 'x', 'y', 'x_interface', 'y_interface' and
'lat', 'lon', 'latb', 'lonb')
plotting_function:
Name of matplotlib 2-d plotting function. Available
options are "pcolormesh", "contour", and "contourf". Defaults to
"pcolormesh".
ax:
Axes onto which the map should be plotted; must be created with
a cartopy projection argument. If not supplied, axes are generated
with a projection. If ax is suppled, faceting is disabled.
row:
Name of diemnsion to be faceted along subplot rows. Must not be a
tile, lat, or lon dimension. Defaults to no row facets.
col:
Name of diemnsion to be faceted along subplot columns. Must not be
a tile, lat, or lon dimension. Defaults to no column facets.
col_wrap:
If only one of `col`, `row` is specified, number of columns to plot
before wrapping onto next row. Defaults to None, i.e. no limit.
projection:
Cartopy projection object to be used in creating axes. Ignored
if cartopy geo-axes are supplied. Defaults to Robinson projection.
colorbar:
Flag for whether to plot a colorbar. Defaults to True.
cmap_percentiles_lim:
If False, use the absolute min/max to set color limits.
If True, use 2/98 percentile values.
cbar_label:
If provided, use this as the color bar label.
coastlines:
Whether to plot coastlines on map. Default True.
coastlines_kwargs:
Dict of arguments to be passed to cartopy axes's
`coastline` function if `coastlines` flag is set to True.
**kwargs: Additional keyword arguments to be passed to the plotting function.
Returns:
figure (plt.Figure):
matplotlib figure object onto which axes grid is created
axes (np.ndarray):
Array of `plt.axes` objects assocated with map subplots if faceting;
otherwise array containing single axes object.
handles (list):
List or nested list of matplotlib object handles associated with
map subplots if faceting; otherwise list of single object handle.
cbar (plt.colorbar):
object handle associated with figure, if `colorbar`
arg is True, else None.
facet_grid (xarray.plot.facetgrid):
xarray plotting facetgrid for multi-axes case. In single-axes case,
retunrs None.
Example:
# plot diag winds at two times
fig, axes, hs, cbar, facet_grid = plot_cube(
diag_ds.isel(time = slice(2, 4)),
'VGRD850',
plotting_function = "contourf",
col = "time",
coastlines = True,
colorbar = True,
vmin = -20,
vmax = 20
)
"""
mappable_ds = _mappable_var(ds, var_name, grid_metadata)
array = mappable_ds[var_name].values
kwargs["vmin"], kwargs["vmax"], kwargs["cmap"] = infer_cmap_params(
array,
vmin=kwargs.get("vmin"),
vmax=kwargs.get("vmax"),
cmap=kwargs.get("cmap"),
robust=cmap_percentiles_lim,
)
_plot_func_short = partial(
_plot_cube_axes,
lat=mappable_ds.lat.values,
lon=mappable_ds.lon.values,
latb=mappable_ds.latb.values,
lonb=mappable_ds.lonb.values,
plotting_function=plotting_function,
**kwargs,
)
projection = ccrs.Robinson() if not projection else projection
if ax is None and (row or col):
# facets
facet_grid = xr.plot.FacetGrid(
data=mappable_ds,
row=row,
col=col,
col_wrap=col_wrap,
subplot_kws={"projection": projection},
)
facet_grid = facet_grid.map(_plot_func_short, var_name)
fig = facet_grid.fig
axes = facet_grid.axes
handles = facet_grid._mappables
else:
# single axes
if ax is None:
fig, ax = plt.subplots(1, 1, subplot_kw={"projection": projection})
else:
fig = ax.figure
handle = _plot_func_short(array, ax=ax)
axes = np.array(ax)
handles = [handle]
facet_grid = None
if coastlines:
coastlines_kwargs = dict() if not coastlines_kwargs else coastlines_kwargs
[ax.coastlines(**coastlines_kwargs) for ax in axes.flatten()]
if colorbar:
if row or col:
fig.subplots_adjust(
bottom=0.1, top=0.9, left=0.1, right=0.8, wspace=0.02, hspace=0.02
)
cb_ax = fig.add_axes([0.83, 0.1, 0.02, 0.8])
else:
fig.subplots_adjust(wspace=0.25)
cb_ax = ax.inset_axes([1.05, 0, 0.02, 1])
cbar = plt.colorbar(handles[0], cax=cb_ax, extend="both")
cbar.set_label(cbar_label or _get_var_label(ds[var_name].attrs, var_name))
else:
cbar = None
return fig, axes, handles, cbar, facet_grid
def _mappable_var(
ds: xr.Dataset, var_name: str, grid_metadata: GridMetadata = WRAPPER_GRID_METADATA,
):
""" Converts a dataset into a format for plotting across cubed-sphere tiles by
checking and ordering its grid variable and plotting variable dimensions
Args:
ds:
Dataset containing the variable to be plotted, along with grid variables.
var_name:
Name of variable to be plotted.
grid_metadata:
vcm.cubedsphere.GridMetadata object describing dim
names and grid variable names
Returns:
ds (xr.Dataset): Dataset containing variable to be plotted as well as grid
variables, all of whose dimensions are ordered for plotting.
"""
mappable_ds = xr.Dataset()
for var, dims in grid_metadata.coord_vars.items():
mappable_ds[var] = _align_grid_var_dims(ds[var], required_dims=dims)
var_da = _align_plot_var_dims(ds[var_name], grid_metadata.y, grid_metadata.x)
return mappable_ds.merge(var_da)
def pcolormesh_cube(
lat: np.ndarray, lon: np.ndarray, array: np.ndarray, ax: plt.axes = None, **kwargs
):
"""Plots tiled cubed sphere. This function applies nan to gridcells which cross
the antimeridian, and then iteratively plots rectangles of array which avoid nan
gridcells. This is done to avoid artifacts when plotting gridlines with the
`edgecolor` argument. In comparison to :py:func:`plot_cube`, this function takes
np.ndarrays of the lat and lon cell corners and the variable to be plotted
at cell centers, and makes only one plot on an optionally specified axes object.
Args:
lat:
Array of latitudes with dimensions (tile, ny + 1, nx + 1).
Should be given at cell corners.
lon:
Array of longitudes with dimensions (tile, ny + 1, nx + 1).
Should be given at cell corners.
array:
Array of variables values at cell centers, of dimensions (tile, ny, nx)
ax:
Matplotlib geoaxes object onto which plotting function will be
called. Default None uses current axes.
**kwargs:
Keyword arguments to be passed to plotting function.
Returns:
p_handle (obj):
matplotlib object handle associated with a segment of the map subplot
"""
if lat.shape != lon.shape:
raise ValueError("lat and lon should have the same shape")
if ax is None:
ax = plt.gca()
central_longitude = ax.projection.proj4_params["lon_0"]
array = np.where(
_mask_antimeridian_quads(lon.T, central_longitude), array.T, np.nan
).T
# oddly a PlateCarree transform seems to be needed here even for non-PlateCarree
# projections?? very puzzling, but it seems to be the case.
kwargs["transform"] = kwargs.get("transform", ccrs.PlateCarree())
kwargs["vmin"] = kwargs.get("vmin", np.nanmin(array))
kwargs["vmax"] = kwargs.get("vmax", np.nanmax(array))
for tile in range(array.shape[0]):
x = center_longitudes(lon[tile, :, :], central_longitude)
y = lat[tile, :, :]
for x_plot, y_plot, array_plot in _segment_plot_inputs(x, y, array[tile, :, :]):
p_handle = ax.pcolormesh(x_plot, y_plot, array_plot, **kwargs)
return p_handle
def _segment_plot_inputs(x, y, masked_array):
"""Takes in two arrays at corners of grid cells and an array at grid cell centers
which may contain NaNs. Yields 3-tuples of rectangular segments of
these arrays which cover all non-nan points without duplicates, and don't contain
NaNs.
"""
is_nan = np.isnan(masked_array)
if np.sum(is_nan) == 0: # contiguous section, just plot it
if np.product(masked_array.shape) > 0:
yield (x, y, masked_array)
else:
x_nans = np.sum(is_nan, axis=1) / is_nan.shape[1]
y_nans = np.sum(is_nan, axis=0) / is_nan.shape[0]
if x_nans.max() >= y_nans.max(): # most nan-y line is in first dimension
i_split = x_nans.argmax()
if x_nans[i_split] == 1.0: # split cleanly along line
yield from _segment_plot_inputs(
x[: i_split + 1, :], y[: i_split + 1, :], masked_array[:i_split, :],
)
yield from _segment_plot_inputs(
x[i_split + 1 :, :],
y[i_split + 1 :, :],
masked_array[i_split + 1 :, :],
)
else:
# split to create segments of complete nans
# which subsequent | |
WITH SHORT RIGHT LEG'
'\x02YA'
'\x03YAA'
'\x03YAH'
'\x03YAI'
'\x03YAN'
'\x0cYAN NUMERAL '
'\x07YANMAR '
'\x07YANSAYA'
'\x03YAT'
'\x03YAU'
'\x04YAWN'
'\x03YAY'
'\x02YE'
'\x03YEE'
'\x03YEH'
'\x1aYEH WITH HAMZA ABOVE WITH '
'\x1cYEH WITH TWO DOTS BELOW AND '
'\x08YEN SIGN'
'\tYESIEUNG-'
'\x04YEUX'
'\x05YGIEA'
'\x02YI'
'\x03YI '
'\x0eYI WITH STROKE'
'\x03YIN'
'\x05YING '
'\nYING CARD '
'\tYING FACE'
'\x10YIR MKPARAQ MEUN'
'\x03YIT'
'\x0cYLEFT SYMBOL'
'\x05YMBOL'
'\x06YMBOL '
'\nYMBOL AIVA'
'\x0cYMBOL BINDU '
'\rYMBOL KIEVAN '
'\rYMBOL TAU RHO'
'\x08YMBOL VI'
'\x02YN'
'\x07YNAMIC '
'\x02YO'
'\x04YODH'
'\x06YOMBO '
'\x06YRENE '
'\x08YRILLIC '
'\x02YU'
'\x01Z'
'\x02ZA'
'\x03ZAH'
'\x1aZAIN WITH INVERTED V ABOVE'
'\x0eZAKAYA LANTERN'
'\x03ZAL'
'2ZANTINE MUSICAL SYMBOL FTHORA SKLIRON CHROMA VASIS'
'\nZAR AMULET'
'\x04ZARD'
'\x04ZATA'
'\x05ZAYIN'
'\x04ZAYN'
'\x04ZEN '
'\x04ZERO'
'\x03ZHE'
'\x07ZIGZAG '
'\x0cZIGZAG ARROW'
'\x04ZIZ2'
'\x02ZO'
'\nZU OVER ZU'
'\x08ZWJ THAJ'
'\x02ZY'
'\x03ZZA'
'\x04ZZHE'
'\x04ZZY '
)
_charnodes =[67466,
-34782,
-1,
132262,
16477,
983080,
197597,
80378,
-1,
263132,
169417,
-1,
328058,
211655,
-1,
393581,
266001,
-1,
459116,
351332,
-1,
524501,
406310,
-1,
590001,
477265,
121091,
720895,
524288,
-1,
721000,
607315,
120832,
851967,
655360,
-1,
852026,
743696,
120846,
983039,
786432,
-1,
983073,
859447,
-1,
1048605,
943048,
120853,
1179647,
983040,
-1,
1179671,
1082395,
-1,
1245206,
1145883,
120877,
1376255,
1179648,
-1,
-65515,
1254047,
120878,
-1,
1317239,
120879,
-1,
1193497,
120884,
1572889,
1130046,
120857,
-1,
1508623,
120882,
1703963,
1516783,
120856,
-1,
1639661,
120885,
-65508,
1661215,
120855,
-1,
1786976,
120854,
2031615,
1013522,
-1,
2031648,
1911975,
120858,
-1,
1967375,
120883,
-1,
1966718,
121001,
2228269,
951317,
120862,
2359295,
2162688,
-1,
2359335,
2235703,
-1,
-65499,
2329660,
120872,
-65498,
2384849,
120893,
-1,
2443121,
120873,
2621482,
2308807,
-1,
-65495,
2581598,
120874,
-1,
2639325,
120875,
2818092,
2561041,
120895,
-1,
2753391,
120894,
-1,
2759031,
120866,
3014707,
2191180,
-1,
3080242,
2966778,
120966,
3211263,
3014656,
-1,
-65487,
3089055,
120972,
-1,
3152247,
120971,
-1,
3019211,
120849,
3407925,
2965054,
120850,
-1,
3343506,
120867,
3538999,
3348855,
120848,
-1,
3474715,
120865,
-65480,
3509229,
120871,
-65479,
3637274,
120864,
-1,
3691442,
120996,
3866713,
820251,
-1,
3932229,
3832859,
121052,
4063231,
3866624,
-1,
4063297,
3965979,
-1,
-65473,
4007859,
121054,
-65472,
4072095,
121055,
-1,
4135287,
121056,
4325444,
4015193,
-1,
-65469,
4275761,
121059,
-1,
4331895,
121057,
-1,
4266999,
121058,
4587599,
3874103,
-1,
4653133,
4556951,
-1,
4718668,
4616012,
-1,
4784203,
4689547,
-1,
-65462,
4754236,
121066,
-1,
4802131,
121064,
-1,
4739787,
121063,
-1,
4680939,
120887,
5177343,
4605786,
121067,
-1,
5055882,
120892,
5242963,
4538051,
121062,
5373951,
5188811,
-1,
-65454,
5278602,
120881,
-1,
5324350,
120899,
5505110,
5191201,
-1,
-65451,
5472228,
121060,
-1,
5511543,
121061,
5767167,
5444632,
-1,
-65448,
5662645,
120890,
-1,
5719012,
120891,
5898334,
3817022,
120843,
6029311,
5832704,
-1,
6029405,
5921162,
120852,
-1,
5965071,
120870,
-1,
5985509,
120844,
6226018,
5868433,
120851,
6356991,
6161637,
-1,
-65439,
6257691,
120868,
-1,
6300319,
120869,
6488165,
6188876,
-1,
-65436,
6440281,
121018,
-1,
6492605,
120841,
6684775,
6429047,
120838,
-1,
6620020,
120861,
-1,
6628591,
120842,
6881407,
689173,
121077,
7012351,
6815744,
-1,
7012465,
6916930,
-1,
7077998,
6979918,
-1,
-65427,
7050474,
121086,
-1,
7094131,
121088,
-65425,
7033793,
120979,
-65424,
7226464,
120840,
-1,
7288465,
121089,
7471222,
6953397,
-1,
7536756,
7423083,
121083,
-1,
7472299,
120876,
-65419,
7501137,
121085,
-1,
7625003,
121084,
7798906,
7437344,
-1,
-65416,
7743411,
121079,
-65415,
7807647,
121080,
-1,
7870839,
121081,
8061053,
7759843,
-1,
-65412,
8030308,
121087,
-1,
8074961,
121090,
-65410,
8011080,
121078,
-1,
8205849,
121082,
8388757,
6838551,
-1,
8454285,
8356891,
-1,
8519814,
8396087,
-1,
8585349,
8472440,
-1,
-65404,
8555402,
120888,
-1,
8601150,
120889,
-1,
8555586,
120886,
8847499,
8459288,
-1,
8913034,
8808362,
-1,
-65399,
8883082,
120896,
-1,
8922206,
120897,
-1,
8864976,
120898,
-65396,
8803250,
121035,
-1,
9125582,
120880,
9306255,
8424330,
121030,
-1,
9241871,
121033,
9437330,
9269068,
-1,
-65391,
9389401,
121037,
-1,
9441725,
121032,
-65389,
9393074,
121036,
-65388,
9578966,
121028,
-1,
9640316,
120860,
9830564,
8344505,
-1,
9896092,
9775574,
120974,
10027007,
9830400,
-1,
10027163,
9926112,
-1,
-65382,
9994212,
120975,
-1,
10036530,
120976,
-1,
9987619,
120977,
10289311,
9863502,
-1,
-65378,
10249920,
120985,
-1,
10305541,
120986,
10485921,
10241107,
120992,
-1,
10421470,
120988,
-65374,
10455946,
120978,
-65373,
10579961,
120983,
-1,
10623351,
120984,
10813615,
9794880,
-1,
10879145,
10770704,
121012,
11010047,
10813440,
-1,
-65368,
10907656,
121014,
-1,
10953375,
121013,
-65366,
10849162,
121006,
-65365,
11109397,
121016,
-65364,
11169785,
121007,
-65363,
11228082,
121008,
-65362,
11289683,
121015,
-1,
11348438,
121003,
-65360,
10763592,
121092,
-1,
11482805,
120906,
11730943,
544395,
120922,
11796479,
11599872,
-1,
11796675,
11678502,
-1,
11862205,
11749608,
120908,
11993087,
11796480,
-1,
11993274,
11877950,
120919,
12124159,
11927552,
-1,
-65351,
12026928,
120920,
-1,
12082832,
120921,
-65349,
11961378,
120914,
-65348,
12203602,
120910,
-1,
12261751,
120912,
12517375,
11822862,
120900,
12582911,
12386304,
-1,
12583105,
12460703,
120903,
-1,
12518539,
120904,
-65342,
12533310,
120902,
-1,
12654967,
120901,
12845257,
11763335,
-1,
12910791,
12787444,
120930,
13041663,
12846351,
120931,
-1,
12910837,
120932,
-65336,
12866482,
120933,
-1,
13059263,
120929,
13238480,
12795208,
120924,
13369343,
13172736,
-1,
13369551,
13251850,
120909,
13500415,
13303808,
-1,
-65330,
13383250,
120911,
-1,
13441399,
120913,
-1,
13337648,
120926,
13697236,
13206555,
-1,
-65326,
13663259,
120925,
-65325,
13710873,
120928,
-1,
13769079,
120927,
-1,
13638111,
120923,
14024977,
466231,
-1,
14090480,
13994135,
-1,
14156011,
14051537,
120941,
14286847,
14090240,
-1,
14287071,
14181574,
120940,
14417919,
14221312,
-1,
14418142,
14320667,
-1,
-65315,
14384155,
120942,
-1,
14431769,
120946,
-1,
14376592,
120944,
14680292,
14238803,
120834,
14811135,
14614528,
-1,
14811363,
14713877,
121068,
-1,
14746460,
121069,
-1,
14775633,
121024,
15007975,
14648347,
-1,
-65306,
14973979,
120943,
-1,
15021593,
120947,
15204585,
14955786,
120915,
-1,
15139676,
120916,
-65302,
15163024,
120945,
-1,
15292727,
121040,
15532031,
14120609,
120949,
15597567,
15400960,
-1,
-65298,
15492294,
120948,
-65297,
15554919,
121039,
-1,
15615115,
121020,
15794439,
14042970,
120950,
15925247,
15728640,
-1,
15925503,
15811667,
120833,
16056319,
15859712,
-1,
16056572,
15948048,
120847,
16187391,
15990784,
-1,
16187641,
16065584,
-1,
-65288,
16133287,
120859,
-1,
16188030,
121002,
-65286,
16155669,
120863,
-65285,
16348475,
120967,
-1,
16405426,
120997,
-65283,
16020817,
121019,
-65282,
16530993,
120845,
-1,
16587127,
120839,
16777475,
15882519,
-1,
16843010,
16741713,
121038,
-1,
16777461,
121045,
-1,
16812938,
121031,
17039622,
16733113,
-1,
-65275,
17009546,
120980,
-1,
17056851,
120993,
-1,
17003857,
121009,
17367039,
15748802,
120934,
17432575,
17235968,
-1,
17432844,
17335323,
-1,
-65269,
17398811,
120935,
-1,
17446425,
120937,
17629454,
17389898,
121046,
-1,
17564778,
121047,
-65265,
17587856,
120936,
-65264,
17712288,
121065,
-1,
17774238,
120905,
17957193,
13974215,
-1,
18022715,
17909208,
120957,
18153471,
17956864,
-1,
18153763,
18039891,
120836,
18284543,
18087936,
-1,
18284829,
18187285,
121072,
18415615,
18219008,
-1,
18415899,
18315298,
-1,
-65254,
18372313,
121073,
-1,
18432424,
121053,
-65252,
18375878,
121070,
-1,
18566674,
121071,
18743585,
18241815,
-1,
18809120,
18707771,
120968,
-1,
18743656,
120973,
-1,
18764722,
120998,
-65246,
18707793,
121025,
-1,
18955838,
121051,
19136809,
18113734,
120955,
19267583,
19070976,
-1,
19267880,
19170331,
-1,
-65241,
19233819,
120959,
-1,
19281433,
120956,
-1,
19226256,
120961,
19530030,
19092402,
120970,
19660799,
19464192,
-1,
19661101,
19547219,
120994,
-1,
19596510,
120989,
-1,
19629077,
120987,
19857714,
19498011,
-1,
19923249,
19823643,
120960,
-1,
19858785,
120963,
-1,
19864011,
120964,
20119860,
19821883,
120999,
-1,
20054438,
121004,
20250934,
20076816,
121029,
-1,
20186162,
121041,
20382009,
20198182,
-1,
-65224,
20342554,
120907,
-1,
20400379,
120917,
-65222,
20348173,
120958,
-1,
20536976,
120962,
20774911,
17982554,
120939,
20840447,
20643840,
-1,
20840771,
20732183,
-1,
20906306,
20804957,
121050,
21037055,
20840448,
-1,
-65215,
20932507,
121048,
-1,
20988893,
121049,
-1,
20874261,
121034,
21233991,
20792440,
-1,
-65211,
21203772,
121023,
-65210,
21260187,
121021,
-1,
21316573,
121022,
-65208,
21198210,
121017,
-1,
21439734,
120938,
21627234,
17896459,
120965,
21757951,
21561344,
-1,
21758294,
21644371,
120837,
21889023,
21692416,
-1,
21889361,
21791765,
121076,
22020095,
21823488,
-1,
-65200,
21915547,
121074,
-1,
21971933,
121075,
22151507,
21853521,
121027,
-1,
22086511,
121026,
22347775,
22108439,
-1,
-65195,
22246715,
120969,
-1,
22303666,
121000,
22479195,
21713849,
-1,
22544730,
22430803,
120995,
22675455,
22480124,
120991,
-1,
22545263,
120990,
-1,
22514570,
120982,
22806879,
22436151,
121044,
22937599,
22740992,
-1,
-65186,
22833051,
121042,
-1,
22889437,
121043,
23134207,
22771008,
-1,
-65183,
23024562,
121011,
-1,
23079393,
121005,
23330815,
21587934,
120951,
23396351,
23199744,
-1,
23396711,
23299099,
-1,
-65178,
23362587,
120952,
-1,
23410201,
120954,
-65176,
23360849,
121010,
-65175,
23551632,
120953,
-65174,
23614401,
120981,
-65173,
23675987,
120835,
-1,
23737610,
120918,
-1,
411463,
121451,
24051711,
338935,
121343,
24117247,
23920640,
-1,
24117625,
24009369,
-1,
24183160,
24053789,
-1,
24248693,
24154434,
-1,
-65165,
24216753,
121346,
-65164,
24281060,
121345,
-1,
24323365,
121348,
24575999,
24196465,
-1,
-65161,
24477668,
121347,
-1,
24519973,
121349,
-1,
24117594,
121350,
-1,
24081712,
121344,
24838836,
285206,
-1,
24904334,
24809165,
-1,
24969717,
24875330,
-1,
25035168,
24911159,
-1,
25100694,
25005417,
-1,
25166227,
25034752,
-1,
25231753,
25115335,
-1,
25297286,
25170502,
-1,
25362820,
25251346,
121230,
-1,
25329458,
121231,
-65147,
25328909,
121228,
-1,
25450478,
121229,
25690111,
25250007,
-1,
-65144,
25573181,
121254,
-1,
25632732,
121261,
25821582,
25194173,
-1,
25887116,
25775634,
121226,
-1,
25853746,
121227,
-65139,
25853197,
121224,
-1,
25974766,
121225,
26214399,
25789213,
-1,
26214802,
26112489,
-1,
-65135,
26180877,
121232,
-1,
26236910,
121233,
-1,
26160901,
121247,
26542079,
25109889,
-1,
-65131,
26443021,
121248,
-1,
26499054,
121249,
26673563,
| |
<gh_stars>0
from .rpc.request import (
rpc_request
)
from .exceptions import (
InvalidRPCReplyError
)
_default_endpoint = 'http://localhost:9500'
_default_timeout = 30
################
# Network RPCs #
################
def get_shard(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get config for the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Shard ID of node
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
method = 'hmy_getNodeMetadata'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']['shard-id']
except KeyError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_staking_epoch(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get epoch number when blockchain switches to EPoS election
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Epoch at which blockchain switches to EPoS election
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
method = 'hmy_getNodeMetadata'
data = rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
try:
return int(data['chain-config']['staking-epoch'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_prestaking_epoch(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get epoch number when blockchain switches to allow staking features without election
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Epoch at which blockchain switches to allow staking features without election
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
method = 'hmy_getNodeMetadata'
data = rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
try:
return int(data['chain-config']['prestaking-epoch'])
except (KeyError, TypeError) as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_sharding_structure(endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get network sharding structure
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list
https://api.hmny.io/#9669d49e-43c1-47d9-a3fd-e7786e5879df
"""
return rpc_request('hmyv2_getShardingStructure', endpoint=endpoint, timeout=timeout)['result']
def get_leader_address(endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Get current leader one address
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
str
One address of current leader
"""
return rpc_request('hmyv2_getLeader', endpoint=endpoint, timeout=timeout)['result']
def get_block_number(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current block number
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Current block number
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_blockNumber'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_current_epoch(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current epoch number
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Current epoch number
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_getEpoch'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_gas_price(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get network gas price
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Network gas price
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_gasPrice'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'], 16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_num_peers(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get number of peers connected to the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Number of connected peers
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
method = 'net_peerCount'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'], 16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_circulate_supply(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current block number
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
int
Circulation supply of tokens in ONE
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_getCirculatingSupply'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_last_cross_links(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get last cross links
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returnss
-------
dict
https://api.hmny.io/#4994cdf9-38c4-4b1d-90a8-290ddaa3040e
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_getLastCrossLinks'
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_total_supply(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get total number of pre-mined tokens
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returnss
-------
int
number of pre-mined tokens
Raises
------
InvalidRPCReplyError
If received unknown result from endpoint
"""
#update to v2
method = 'hmyv2_getTotalSupply'
try:
return int(rpc_request(method, endpoint=endpoint, timeout=timeout)['result'],16)
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_validators(epoch, endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get list of validators for specific epoch number
Parameters
----------
epoch: int
Epoch to get list of validators for
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
https://api.hmny.io/#4dfe91ad-71fa-4c7d-83f3-d1c86a804da5
"""
params = [
epoch
]
#update to v2 and move from block subcategory to network subcategory
method = 'hmyv2_getValidators'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_validator_keys(epoch, endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Get list of validator public bls keys for specific epoch number
Parameters
----------
epoch: int
Epoch to get list of validator keys for
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list
List of bls public keys in the validator committee
"""
params = [
epoch
]
method = 'hmyv2_getValidatorKeys'
try:
return rpc_request(method, params=params, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
##############
# Node RPCs #
##############
def get_current_bad_blocks(endpoint=_default_endpoint, timeout=_default_timeout) -> list:
"""
Known issues with RPC not returning correctly
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
list
List of bad blocks in node memory
"""
method = "hmyv2_getCurrentBadBlocks"
try:
return rpc_request(method,endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_node_metadata(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get config for the node
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
------
dict
https://api.hmny.io/#03c39b56-8dfc-48ce-bdad-f85776dd8aec
"""
method = "hmyv2_getNodeMetadata"
try:
return rpc_request(method,endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_protocol_version(endpoint=_default_endpoint, timeout=_default_timeout) -> int:
"""
Get current protocol version
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
------
int
"""
method = "hmyv2_protocolVersion"
try:
return rpc_request(method,endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_net_peer_count(endpoint=_default_endpoint, timeout=_default_timeout) -> str:
"""
Get peer number in the net
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
------
str
Number of peers represented as a Hex string
"""
method = "net_peerCount"
try:
return rpc_request(method,endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
##############
# Block RPCs #
##############
def get_latest_header(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block header of latest block
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
# TODO: Add link to reference RPC documentation
"""
method = "hmyv2_latestHeader"
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_latest_headers(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block header of latest block for beacon chain & shard chain
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
https://api.hmny.io/#7625493d-16bf-4611-8009-9635d063b4c0
"""
method = "hmyv2_getLatestChainHeaders"
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_header_by_number(endpoint=_default_endpoint, timeout=_default_timeout) -> dict:
"""
Get block headers by block number
Parameters
----------
endpoint: :obj:`str`, optional
Endpoint to send request to
timeout: :obj:`int`, optional
Timeout in seconds
Returns
-------
dict
https://api.hmny.io/#01148e4f-72bb-426d-a123-718a161eaec0
"""
method = "hmyv2_getHeaderByNumber"
try:
return rpc_request(method, endpoint=endpoint, timeout=timeout)['result']
except TypeError as e:
raise InvalidRPCReplyError(method, endpoint) from e
def get_block_by_number(block_num, include_full_tx= True, inclTx = True, inclStaking= True, | |
<filename>pynos/versions/ver_7/ver_7_1_0/yang/brocade_port_profile_ext.py
#!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_port_profile_ext(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def get_port_profile_for_intf_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_get_request_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_type = ET.SubElement(get_request, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_get_request_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_name = ET.SubElement(get_request, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_interface_info = ET.SubElement(getnext_request, "last-received-interface-info")
interface_type = ET.SubElement(last_received_interface_info, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_interface_info = ET.SubElement(getnext_request, "last-received-interface-info")
interface_name = ET.SubElement(last_received_interface_info, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_interface_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
port_profile = ET.SubElement(interface, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_input_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
port_profile_name = ET.SubElement(input, "port-profile-name")
port_profile_name.text = kwargs.pop('port_profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_input_port_profile_status(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
port_profile_status = ET.SubElement(input, "port-profile-status")
port_profile_status.text = kwargs.pop('port_profile_status')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_input_request_type_getnext_request_last_received_port_profile_info_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_port_profile_info = ET.SubElement(getnext_request, "last-received-port-profile-info")
profile_name = ET.SubElement(last_received_port_profile_info, "profile-name")
profile_name.text = kwargs.pop('profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_input_request_type_getnext_request_last_received_port_profile_info_profile_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
input = ET.SubElement(get_port_profile_status, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_port_profile_info = ET.SubElement(getnext_request, "last-received-port-profile-info")
profile_mac = ET.SubElement(last_received_port_profile_info, "profile-mac")
profile_mac.text = kwargs.pop('profile_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_ppid(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
ppid = ET.SubElement(port_profile, "ppid")
ppid.text = kwargs.pop('ppid')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_is_active(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
is_active = ET.SubElement(port_profile, "is-active")
is_active.text = kwargs.pop('is_active')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_mac_association_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
mac_association = ET.SubElement(port_profile, "mac-association")
mac = ET.SubElement(mac_association, "mac")
mac.text = kwargs.pop('mac')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_mac_association_applied_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
mac_association = ET.SubElement(port_profile, "mac-association")
mac_key = ET.SubElement(mac_association, "mac")
mac_key.text = kwargs.pop('mac')
applied_interface = ET.SubElement(mac_association, "applied-interface")
interface_type = ET.SubElement(applied_interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_mac_association_applied_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
mac_association = ET.SubElement(port_profile, "mac-association")
mac_key = ET.SubElement(mac_association, "mac")
mac_key.text = kwargs.pop('mac')
applied_interface = ET.SubElement(mac_association, "applied-interface")
interface_name = ET.SubElement(applied_interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_output_port_profile_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_status = ET.Element("get_port_profile_status")
config = get_port_profile_status
output = ET.SubElement(get_port_profile_status, "output")
port_profile = ET.SubElement(output, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
has_more = ET.SubElement(port_profile, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_rbridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
rbridge_id = ET.SubElement(input, "rbridge-id")
rbridge_id.text = kwargs.pop('rbridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_get_request_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_type = ET.SubElement(get_request, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_get_request_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_name = ET.SubElement(get_request, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_interface_info = ET.SubElement(getnext_request, "last-received-interface-info")
interface_type = ET.SubElement(last_received_interface_info, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_input_request_type_getnext_request_last_received_interface_info_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
input = ET.SubElement(get_port_profile_for_intf, "input")
request_type = ET.SubElement(input, "request-type")
getnext_request = ET.SubElement(request_type, "getnext-request")
last_received_interface_info = ET.SubElement(getnext_request, "last-received-interface-info")
interface_name = ET.SubElement(last_received_interface_info, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_interface_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
interface_type = ET.SubElement(interface, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_interface_interface_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
interface_name = ET.SubElement(interface, "interface-name")
interface_name.text = kwargs.pop('interface_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_interface_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
interface = ET.SubElement(output, "interface")
port_profile = ET.SubElement(interface, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_for_intf_output_has_more(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_port_profile_for_intf = ET.Element("get_port_profile_for_intf")
config = get_port_profile_for_intf
output = ET.SubElement(get_port_profile_for_intf, "output")
has_more = ET.SubElement(output, "has-more")
has_more.text = kwargs.pop('has_more')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def get_port_profile_status_input_rbridge_id(self, **kwargs):
| |
<reponame>neurodata/graphbook-code
# -*- coding: utf-8 -*-
import seaborn as sns
import numpy as np
import matplotlib as mpl
from matplotlib.colors import Colormap
# from graspologic.plot.plot import _check_common_inputs, _process_graphs, _plot_groups
from graspologic.plot.plot import (
_check_common_inputs,
_process_graphs,
make_axes_locatable,
_plot_brackets,
_sort_inds,
_unique_like,
_get_freqs,
)
from graspologic.utils import import_graph
import warnings
import matplotlib.pyplot as plt
import networkx as nx
from typing import Union, Optional, Tuple
from matplotlib.axes import Axes
from scipy.sparse import csr_matrix
from graspologic.plot.plot import _check_common_inputs
from graspologic.preconditions import check_argument
from sklearn.utils import check_consistent_length
import pandas as pd
import seaborn as sns
from matplotlib.collections import LineCollection
from matplotlib.patches import Circle
from matplotlib.patheffects import withStroke
cmaps = {"sequential": "Purples", "divergent": "RdBu_r", "qualitative": "tab10"}
def add_circle(x=0, y=0, radius=0.15, ax=None):
if ax is None:
fig, ax = plt.subplots()
circle = Circle(
(x, y),
radius,
clip_on=False,
zorder=10,
linewidth=1,
edgecolor="black",
facecolor=(0, 0, 0, 0.0125),
path_effects=[withStroke(linewidth=5, foreground="w")],
)
ax.add_artist(circle)
def text(label, x, y, ax=None, *args, **kwargs):
"""
Add text to a figure.
"""
if ax is None:
ax = plt.gca()
left, width, bottom, height = 0.25, 0.5, 0.25, 0.5
right = left + width
top = bottom + height
t = ax.text(
x * (left + right),
y * (bottom + top),
label,
horizontalalignment="center",
verticalalignment="center",
transform=ax.transAxes,
size=32,
bbox=dict(facecolor="white", edgecolor="none", alpha=0.5),
*args,
**kwargs,
)
return t
class GraphColormap:
"""
Default class for colormaps.
"""
def __init__(self, color, discrete=True, k=None):
"""
color corresponds to the name of the map type (sequential, divergent, quualitative).
If discrete is true, discretizes the colormap. Must be true for qualitative colormap.
"""
if color not in cmaps.keys():
msg = "`color` option not a valid option."
raise ValueError(msg)
if (k is not None) and (not discrete):
msg = "`k` only specified (optionally) for discrete colormaps."
raise ValueError(msg)
self.scale = color
self.color = cmaps[color]
self.discrete = discrete
self.k = k
kwargs = {}
kwargs["as_cmap"] = not self.discrete
if k is not None:
kwargs["n_colors"] = self.k
self.palette = sns.color_palette(self.color, **kwargs)
def networkplot(
adjacency: Union[np.ndarray, csr_matrix],
x: Union[np.ndarray, str],
y: Union[np.ndarray, str],
node_data: Optional[pd.DataFrame] = None,
node_hue: Optional[Union[np.ndarray, str]] = None,
palette: Optional[Union[str, list, dict]] = None,
node_size: Optional[Union[np.ndarray, str]] = None,
node_sizes: Optional[Union[list, dict, tuple]] = None,
node_alpha: float = 0.8,
edge_hue: str = "source",
edge_linewidth: float = 0.2,
edge_alpha: float = 0.2,
title: str = "",
context: str = "talk",
font_scale: float = 1.0,
figsize: Tuple[int, int] = (10, 10),
ax: Optional[Axes] = None,
legend: str = False,
lckwargs: Tuple = {},
skwargs: Tuple = {},
) -> Axes:
# Alex Note: this is a better version of draw_layout_plot, soon
# to be included in graspologic (PR open)
r"""
Plots 2D layout of input network. Allows for an adjacency matrix
with ``x, y`` as 1D arrays that represent the coordinates of each
node, or an adjacency matrix with ``node_data`` and ``x, y`` as
keys. Note that the indices of the positions given are assumed to
correspond with the adjacency matrix.
Node colors are determined by ``node_hue`` and ``palette``, and if
``node_hue`` is None, all nodes will have the same default color
used by :func:`seaborn.scatterplot`. If ``node_hue`` is given but
``palette`` is None, ``palette`` is set to 'Set1' and ``node_hue``
will be treated as numeric variables. Edge colors are determined by
its nodes, and ``edge_hue`` dictates whether the edges are colored
based on its source or target nodes.
Node sizes can also vary based on ``node_size`` and ``node_sizes``,
and if ``node_size`` is None, all nodes will be of the same default
size used by :func:`seaborn.scatterplot`. If ``node_size`` is given
but ``node_sizes`` is None, ``node_size`` will be treated as numeric
variables.
Note that ``palette`` and ``node_sizes`` will not affect the output
plot if ``node_hue`` and ``node_size`` are None, and ``node_hue`` and
``node_size`` must be the same types as ``x, y``.
Parameters
----------
adjacency: np.ndarray, csr_matrix
Adjacency matrix of input network.
x,y: np.ndarray, str
Variables that specify the positions on the x and y axes. Either an
array of x, y coordinates or a string that accesses a vector in
``node_data``. If ``x, y`` are arrays, they must be indexed the
same way as the adjacency matrix of the input network.
node_data: pd.DataFrame, optional, default: None
Input data. When ``node_data`` is None, ``x, y`` must be np.ndarrays.
When ``node_data`` is a dataframe, ``x, y`` must be strings. Must be
indexed the same way as the adjacency matrix of the input network.
node_hue: np.ndarray, str, optional, default: None
Variable that produces nodes with different colors. Can be either
categorical or numeric, and colors are mapped based on ``palette``.
However if ``palette`` is None, ``node_hue`` is treated as numeric
and 'Set1' is used as ``palette``.
palette: str, list, dict, optional, default: None
Method for choosing colors specified in ``node_hue``. Can be a string
argument supported by :func:`seaborn.color_palette`, a list of colors,
or a dictionary with ``node_hue`` variables as keys and colors as its
values. Note that ``palette`` will not affect the plot if ``node_hue``
is not given.
node_size: np.ndarray, str, optional, default: None
Variable that produces nodes with different sizes. Can be either categorical
or numeric, and sizes are determined based on ``node_sizes``. If the
argument ``node_sizes`` is None, ``node_size`` will be treated as
numeric variables.
node_sizes: list, dict, tuple, optional, default: None
Method for choosing sizes specified in ``node_size``. Can be a list of
sizes, a dictionary with ``node_size`` variables as keys and sizes as
its values, or a tuple defining the minimum and maximum size values.
Note that ``node_sizes`` will not affect the output plot if ``node_hue``
is not given.
node_alpha: float, default: 0.8
Proportional opacity of the nodes.
edge_hue: str, one of {source (default), target}
Determines edge color based on its source or target node.
edge_linewidth: float, default: 0.2
Linewidth of the edges.
edge_alpha: float, default: 0.2
Proportional opacity of the edges.
title: str
Plot title.
context : None, or one of {talk (default), paper, notebook, poster}
Seaborn plotting context
font_scale : float, optional, default: 1.0
Separate scaling factor to independently scale the size of the font
elements.
figsize : tuple of length 2, default: (10, 10)
Size of the figure (width, height)
ax: matplotlib.axes.Axes, optional, default: None
Axes in which to draw the plot. Otherwise, will generate own axes.
legend: False (default), or one of {brief, full, auto}
How to draw the legend. If “brief”, numeric hue and size variables
will be represented with a sample of evenly spaced values. If “full”,
every group will get an entry in the legend. If “auto”, choose
between brief or full representation based on number of levels. If
False, no legend data is added and no legend is drawn.
Returns
-------
ax : matplotlib axis object
Output plot
"""
_check_common_inputs(
figsize=figsize, title=title, context=context, font_scale=font_scale
)
index = range(adjacency.shape[0])
if isinstance(x, np.ndarray):
check_consistent_length(adjacency, x, y)
check_argument(
node_data is None, "If x and y are numpy arrays, meta_data must be None."
)
plot_df = pd.DataFrame(index=index)
x_key = "x"
y_key = "y"
plot_df.loc[:, x_key] = x
plot_df.loc[:, y_key] = y
if node_hue is not None:
check_argument(
isinstance(node_hue, np.ndarray),
"If x and y are numpy arrays, node_hue must be a list or a numpy array.",
)
check_consistent_length(x, node_hue)
hue_key = "hue"
plot_df.loc[:, hue_key] = node_hue
if palette is None:
palette = "Set1"
else:
hue_key = None
elif isinstance(x, str):
check_consistent_length(adjacency, node_data)
check_argument(
node_data is not None,
"If x and y are strings, meta_data must be pandas DataFrame.",
)
plot_df = node_data.copy()
x_key = x
y_key = y
if node_hue is not None:
check_argument(
isinstance(node_hue, str),
"If x and y are strings, node_hue must also be a string.",
)
hue_key = node_hue
if palette is None:
palette = "Set1"
else:
hue_key = None
else:
raise TypeError("x and y must be numpy arrays or strings.")
pre_inds, post_inds = adjacency.nonzero()
pre = np.array(index)[pre_inds.astype(int)]
post = np.array(index)[post_inds.astype(int)]
rows = {"source": pre, "target": post}
edgelist = pd.DataFrame(rows)
pre_edgelist = edgelist.copy()
post_edgelist = edgelist.copy()
pre_edgelist["x"] = pre_edgelist["source"].map(plot_df[x_key])
pre_edgelist["y"] = pre_edgelist["source"].map(plot_df[y_key])
post_edgelist["x"] = post_edgelist["target"].map(plot_df[x_key])
post_edgelist["y"] = post_edgelist["target"].map(plot_df[y_key])
pre_coords = list(zip(pre_edgelist["x"], pre_edgelist["y"]))
post_coords = list(zip(post_edgelist["x"], post_edgelist["y"]))
coords = list(zip(pre_coords, post_coords))
if node_hue is | |
if grid_size == 0:
s = imsize/128.
rangeXY = np.arange(20*s, 110*s+1, 10*s) - 1 # 10x10
elif grid_size == 1:
s = imsize/128.
rangeXY = np.arange(10*s, 120*s+1, 10*s) - 1 # 12x12
else:
rangeXY = np.arange(imsize) # 128x128 or 256x256
self.rangeXY = rangeXY.astype(int)
[xx,yy] = np.meshgrid(rangeXY,rangeXY)
self.grid = xx + 1j*yy
self.grid = self.grid.T.ravel() # transpose just to match MatLab's grid(:) behavior
self.grid_pos = np.hstack([self.grid.imag, self.grid.real]).T
def test(self,
test_ims,
cell_type='complex',
sigma=2*np.pi,
layers='magnitudes',
return_dict=False
):
"""
Apply GaborJet to given images.
:Args:
test_ims: str or list of str
Image(s) to process with the model.
:Kwargs:
- cell_type (str, default: 'complex')
Choose between 'complex'(40 output values) and 'simple' (80
values)
- sigma (float, default: 2*np.pi)
Control the size of gaussian envelope
- layers ({'all', 'phases', 'magnitudes'}, default: 'magnitudes')
Not truly layers, but two output possibilities: either Fourier
magnitudes or phases.
- return_dict (bool, default: True)
Whether only magnitude should be returned. If True, then also
phase and grid positions are returned in a dict.
:Returns:
Magnitude and, depending on 'return_dict', phase.
"""
mags = []
phases = []
imlist = self._im2iter(test_ims)
for imno, im in enumerate(imlist):
sys.stdout.write("\rRunning %s... %d%%" % (self.name,
100*imno/len(imlist)))
sys.stdout.flush()
im = self.load_image(im, resize=(self.imsize, self.imsize), flatten=True)
mag, phase = self._test(im, cell_type=cell_type, sigma=sigma)
mags.append(mag.ravel())
phases.append(phase.ravel())
sys.stdout.write("\rRunning %s... done\n" % self.name)
output = OrderedDict([('phases', np.array(phases)),
('magnitudes', np.array(mags))])
output = self._fmt_output(output, layers, return_dict=return_dict)
return output
def _test(self, im, cell_type='complex', sigma=2*np.pi):
# FFT of the image
im_freq = np.fft.fft2(im)
# setup the paramers
kx_factor = 2 * np.pi / self.imsize
ky_factor = 2 * np.pi / self.imsize
# setup space coordinates
xy = np.arange(-self.imsize/2, self.imsize/2).astype(float)
[tx,ty] = np.meshgrid(xy, xy)
tx *= kx_factor
ty *= -ky_factor
# initiallize useful variables
nvars = self.nscales * self.noris
if cell_type == 'complex':
mag = np.zeros((len(self.grid), nvars))
phase = np.zeros((len(self.grid), nvars))
else:
mag = np.zeros((len(self.grid), 2*nvars))
phase = np.zeros((len(self.grid), nvars))
for scale in range(self.nscales):
k0 = np.pi/2 * (1/np.sqrt(2))**scale
for ori in range(self.noris):
ka = np.pi * ori / self.noris
k0x = k0 * np.cos(ka)
k0y = k0 * np.sin(ka)
# generate a kernel specified scale and orientation, which has DC on the center
# this is a FFT of a Morlet wavelet (http://en.wikipedia.org/wiki/Morlet_wavelet)
freq_kernel = 2*np.pi * (
np.exp( -(sigma/k0)**2/2 * ((k0x-tx)**2 + (k0y-ty)**2) ) -\
np.exp( -(sigma/k0)**2/2 * (k0**2+tx**2+ty**2) )
)
# use fftshift to change DC to the corners
freq_kernel = np.fft.fftshift(freq_kernel)
# convolve the image with a kernel of the specified scale and orientation
conv = im_freq*freq_kernel
# calculate magnitude and phase
iconv = np.fft.ifft2(conv)
# import ipdb; ipdb.set_trace()
#eps = np.finfo(float).eps**(3./4)
#real = np.real(iTmpFilterImage)
#real[real<eps] = 0
#imag = np.imag(iTmpFilterImage)
#imag[imag<eps] = 0
#iTmpFilterImage = real + 1j*imag
ph = np.angle(iconv)
ph = ph[self.rangeXY,:][:,self.rangeXY] + np.pi
ind = scale*self.noris+ori
phase[:,ind] = ph.ravel()
if cell_type == 'complex':
mg = np.abs(iconv)
# get magnitude and phase at specific positions
mg = mg[self.rangeXY,:][:,self.rangeXY]
mag[:,ind] = mg.ravel()
else:
mg_real = np.real(iconv)
mg_imag = np.imag(iconv)
# get magnitude and phase at specific positions
mg_real = mg_real[self.rangeXY,:][:,self.rangeXY]
mg_imag = mg_imag[self.rangeXY,:][:,self.rangeXY]
mag[:,ind] = mg_real.ravel()
mag[:,nvars+ind] = mg_imag.ravel()
# use magnitude for dissimilarity measures
return mag, phase
def dissimilarity(self, kind='cosine', *args, **kwargs):
"""
Default dissimilarity for :class:`GaborJet` is `cosine`.
"""
return super(GaborJet, self).dissimilarity(kind=kind, *args, **kwargs)
class HMAX99(_Model):
"""
HMAX for Python
Based on the original HMAX (`Riesenhuber & Poggio, 1999
<http://dx.doi.org/10.1038/14819>`_)
Code rewritten using a Pure MATLAB implementation by <NAME> at the
MIT Center for Biological and Computational Learning. Most of the
structure, variable names and some of the comments come from this
implementation. More comments have been added and code was optimized as
much as possible while trying to maintain its structure close to the
original. View-tuned units have been added by <NAME>ck.
The output was tested against the Pure MatLab output which can be tested
against the Standard C/MATLAB code featured at `Riesenhuber's lab
<http://riesenhuberlab.neuro.georgetown.edu/hmax/index.html#code>`_.
.. note:: This implementation is not the most current HMAX
implementation that doesn't rely on hardcoding features anymore
(e.g., Serre et al., 2007). Use :class:`HMAX_HMIN` or :class:`HMAX_PNAS` to access MATLAB
interface to a more current version of HMAX.
.. note:: Images are resized to 256 x 256 as required by the original
implementation
Original VTU implementation copyright 2007 <NAME>
Original MatLab implementation copyright 2004 <NAME>
Since the original code did not specify a license type, I assume GNU GPL v3
since it is used in `Jim Mutch's latest implementation of HMAX
<http://cbcl.mit.edu/jmutch/cns/>`_
:Kwargs:
- matlab (boolean, default: False)
If *True*, Gaussian filters will be implemented using the
original models implementation which mimicks MatLab's behavior.
Otherwise, a more efficient numerical method is used.
- filter_type ({'gaussian', 'gabor'}, default: 'gaussian')
Type of V1 filter. We default to gaussian as it was used originally
in HMAX'99. However, many people prefer using Gabor filters as
they presumambly model V1 better.
"""
def __init__(self, matlab=False, filter_type='gaussian'):
super(HMAX99, self).__init__()
self.name = "HMAX'99"
self.safename = 'hmax99'
self.isflat = True
self.n_ori = 4 # number of orientations
# S1 filter sizes for scale band 1, 2, 3, and 4
self.filter_sizes_all = [[7, 9], [11, 13, 15], [17, 19, 21],
[23, 25, 27, 29]]
# specify (per scale band) how many S1 units will be used to pool over
self.C1_pooling_all = [4, 6, 9, 12]
self.S2_config = [2,2] # how many C1 outputs to put into one "window" in S2 in each direction
if filter_type == 'gaussian': # "typically" used
if matlab: # exact replica of the MatLab implementation
self.filts = self.get_gaussians_matlab(self.filter_sizes_all,
self.n_ori)
else: # a faster and more elegant implementation
self.filts = self.get_gaussians(self.filter_sizes_all,
self.n_ori)
self.mask_name = 'square'
elif filter_type == 'gabor':
self.filts = self.get_gabors(self.filter_sizes_all, self.n_ori)
self.mask_name = 'circle'
else:
raise ValueError("filter type not recognized")
self.istrained = False # initially VTUs are not set up
def train(self, train_ims):
"""
Train the model
That is, supply view-tuned units (VTUs) with C2 responses to
'prototype' images, to which these VTUs will be maximally tuned.
:Args:
train_ims (str, list, tuple, np.ndarray)
Training images
"""
try:
self.tuning = pickle.load(open(train_ims,'rb'))
print('done')
except:
self.tuning = self.test(train_ims, op='training', layers='C2',
return_dict=False)
self.istrained = True
def test(self, test_ims, op='testing', layers='output', return_dict=True):
"""
Test the model on the given image
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images.
"""
ims = self._im2iter(test_ims)
# Get number of filter sizes
out = OrderedDict()
size_S1 = sum([len(fs) for fs in self.filter_sizes_all])
S1 = np.zeros((256, 256, size_S1, self.n_ori))
out['C1'] = np.zeros((len(ims), 256, 256, self.n_ori,
len(self.filter_sizes_all)))
# S2 has an irregular shape which depends on the spatial frequency band
S2 = []
C2_tmp = np.zeros(((self.S2_config[0]*self.S2_config[1])**self.n_ori,
len(self.filter_sizes_all)))
out['C2'] = np.zeros((len(ims), C2_tmp.shape[0]))
for imno, im in enumerate(ims):
# im *= 255
sys.stdout.write("\rRunning HMAX'99... %s: %d%%" %(op, 100*imno/len(ims)))
sys.stdout.flush()
im = self.load_image(im, flatten=True, resize=(256,256))
# Go through each scale band
S1_idx = 0
for which_band in range(len(self.filter_sizes_all)):
# calculate S1 responses
S1_tmp = self.get_S1(im, which_band)
num_filter = len(self.filter_sizes_all[which_band])
# store S1 responses for each scale band
S1[..., S1_idx:S1_idx + num_filter, :] = S1_tmp
S1_idx += num_filter
# calculate other layers
C1_tmp = self.get_C1(S1_tmp, which_band)
out['C1'][imno, ..., which_band] = C1_tmp
S2_tmp = self.get_S2(C1_tmp, which_band)
S2.append(S2_tmp)
C2_tmp[:, which_band] = self.get_C2(S2_tmp, which_band)
out['C2'][imno] = np.max(C2_tmp, -1) # max over all scale bands
# calculate VTU if trained
if self.istrained:
out['VTU'] = self.get_VTU(out['C2'])
sys.stdout.write("\rRunning HMAX'99... %s: done\n" %op)
output = self._fmt_output(out, layers, return_dict=return_dict)
return output
def get_gaussians(
self,
filter_sizes_all,
n_ori = 4,
sigDivisor = 4.
):
"""
Generates 2D difference of Gaussians (DoG) filters.
This function is a faster, more accurate and more elegant version of
the original gaussian_filters_matlab but will not produce identical
filters as the original (but very close). For practical purposes, this
one is prefered. In case you want to mimic the identical behavior of
the original HMAX, use gaussian_filters_matlab.
:Args:
filter_sizes_all (list of depth 2)
A nested list (grouped by filter bands) of integer filter sizes
:Kwargs:
- n_ori (int, default: 4)
A number of filter orientations. Orientations are spaced by np.pi/n_ori.
| |
<gh_stars>0
"""
Authors: <NAME>, <NAME>
Principal Investigator: <NAME>, Ph.D. from Brown University
12 February 2020
Updated: 27 November 2020
SCOT algorithm: Single Cell alignment using Optimal Transport
Correspondence: <EMAIL>, <EMAIL>, <EMAIL>
"""
### Import python packages we depend on:
# For regular matrix operations:
import numpy as np
# For optimal transport operations:
import ot
from ot.unbalanced import sinkhorn_unbalanced
from ot.gromov import init_matrix, gwloss, gwggrad
# For computing graph distances:
from sklearn.neighbors import kneighbors_graph
from scipy.sparse.csgraph import dijkstra
# For pre-processing, normalization
from sklearn.preprocessing import StandardScaler, normalize
# For convergence errors and parameter warnings:
import sys
import warnings
class SCOT(object):
"""
SCOT algorithm for unsupervised alignment of single-cell multi-omic data.
https://www.biorxiv.org/content/10.1101/2020.04.28.066787v2
Example use:
scot= SCOT(domain1, domain2)
aligned_domain1, aligned_domain2= scot.align(k, e, balanced=True, rho=None, verbose=True, normalize=True, norm="l2", XontoY=True)
Input: domain1, domain2, in form of numpy arrays/matrices, where the rows correspond to samples and columns correspond to features.
Returns: aligned domain 1, aligned domain 2, in form of numpy arrays/matrices.
Parameters:
k: Number of neighbors to be used when constructing kNN graphs. Default is min(50, 0.2n), where n is the number of samples in the smallest domain
e: Regularization constant for the entropic regularization term in entropic Gromov-Wasserstein optimal transport formulation. Default: 1e-4. We recommend users to search a grid between 5e-4 to 1e-2
balanced: If you believe there will be a significant underrepresentation/overrepresentation of certain cell types in one of the domains you attempt to align, set this to False. When set to False, it performs unbalanced optimal transport to account for underrepresentation. Default=True.
rho: Only needs to be set if using unbalanced OT (if balanced is set to False). Defines the regularization constant for KL relaxation term in unbalanced optimal transport. Default = 5e-2. Ideal value defines on the extent of underrepresentation of cell types between the domains (more unbalanced might want more relaxation)
verbose: Prints loss when optimizing the optimal transport formulation. Default=True
normalize: When set to True, normalizes the input domains before performing alingment, otherwise skips normalization. Default= True
norm: Describes what type of data normalization to use. Available options: "l2", "l1", "max", "zscore". Default= "l2". We have found l2 normalization yields better empirical results with real world single-cell sequencing data.
XontoY: Describes the direction of barcentric projection used for alignment. When set to True, projects domain 1 onto domain 2. False does opposite. Direction of projection makes little difference in alignment quality. Default= True.
"""
def __init__(self, domain1, domain2, normalize=True):
self.X=domain1
self.y=domain2
self.p= None #empirical probability distribution for domain 1 (X)
self.q= None #empirical probability distribution for domain 2 (y)
self.Xgraph=None #kNN graph of domain 1 (X)
self.ygraphh=None #kNN graph of domain 2 (y)
self.Cx=None #intra-domain graph distances for domain 1 (X)
self.Cy=None #intra-domain graph distances for domain 2 (y)
self.coupling=None # Coupling matrix that relates domain 1 and domain 2. Entries describes the probability of correspondence between the samples in domain 1 (rows) and domain 2 (columns)
self.gwdist=None # Gromov-Wasserstein distance between domains after alignment. Can be used as a proxy for alignment quality
self.flag = None # convergence flag
def init_marginals(self):
self.p= ot.unif(self.X.shape[0]) # Without any prior information, we set the probabilities to what we observe empirically: uniform over all observed samples
self.q= ot.unif(self.y.shape[0]) # Without any prior information, we set the probabilities to what we observe empirically: uniform over all observed samples
def normalize(self, norm="l2", bySample=True):
assert (norm in ["l1","l2","max", "zscore"]), "Norm argument has to be either one of 'max', 'l1', 'l2' or 'zscore'. If you would like to perform another type of normalization, please give SCOT the normalize data and set the argument normalize=False when running the algorithm."
if (bySample==True or bySample==None):
axis=1
else:
axis=0
if norm=="zscore":
scaler=StandardScaler()
self.X=scaler.fit_transform(self.X)
self.y=scaler.fit_transform(self.y)
else:
self.X=normalize(self.X, norm=norm, axis=axis)
self.y=normalize(self.y, norm=norm, axis=axis)
def build_kNN(self, k, mode="connectivity", metric="correlation"):
"""
Helper function: Builds kNN graphs for each domain
To be used in intradomain distance matrix computations
"""
# Check that inputs are legal
assert (mode in ["connectivity", "distance"]), "Mode argument has to be either one of 'connectivity' or 'distance'. "
assert(k <= min(self.X.shape[0], self.y.shape[0])), "Please set the argument k (for the number of neighbors in kNN graphs) to something no larger than the number of samples in the domain with the fewest samples"
if mode=="connectivity":
include_self=True
else:
include_self=False
# Build graph for the two domains
self.Xgraph= kneighbors_graph(self.X, k, mode=mode, metric=metric, include_self=include_self)
self.ygraph= kneighbors_graph(self.y, k, mode=mode, metric=metric, include_self=include_self)
def compute_graphDistances(self):
# Compute shortest paths on the kNN graphs for intra-domain distances:
self.Cx=dijkstra(csgraph= self.Xgraph, directed=False, return_predecessors=False)
self.Cy=dijkstra(csgraph= self.ygraph, directed=False, return_predecessors=False)
# Checking for illegal values: if infinite distances exist (e.g. not a connected graph), correct them to maximum finite distance on the graph:
X_maxShortest= np.nanmax(self.Cx[self.Cx != np.inf])
self.Cx[self.Cx > X_maxShortest] = X_maxShortest
y_maxShortest= np.nanmax(self.Cy[self.Cy != np.inf])
self.Cy[self.Cy > y_maxShortest] = y_maxShortest
# Normalize intra-domain distances based on maximum distance:
self.Cx=np.asarray(self.Cx/self.Cx.max(), dtype=np.float64)
self.Cy=np.asarray(self.Cy/self.Cy.max(), dtype=np.float64)
def unbalanced_entropic_gromov_wasserstein(self, e, rho, loss_fun="square_loss", max_iter=1000, tol=1e-6, verbose=True):
"""
Helper function: unbalanced Gromov-Wasserstein OT for when there is cell type unbalance between domains
Used when balanced=False in align() function.
Adapted from POT package using ot.unbalanced.sinkhorn() and ot.gromov.entropic_gromov_wasserstein()
Parameters:
e: Regularization constant for the entropic regularization term in entropic Gromov-Wasserstein optimal transport formulation.
rho: Regularization constant for KL relaxation term in unbalanced Sinkhorn-Knopp iterations.
Returns: Coupling matrix and log dictionary where error and GW distance from optimization have been logged
"""
coupling = np.outer(self.p, self.q) # Initialize the coupling matrix
constC, hCx, hCy = init_matrix(self.Cx, self.Cy, self.p, self.q, loss_fun)
cpt = 0
err = 1
log = {'err': []}
while (err > tol and cpt < max_iter):
couplingPrev = coupling
# compute the gradient
grad = gwggrad(constC, hCx, hCy, coupling)
coupling = sinkhorn_unbalanced(self.p, self.q, grad, e, rho, method='sinkhorn', numItermax=max_iter, stopThr=tol, verbose=verbose, log=False)
if cpt % 10 == 0:
# we can speed up the process by checking for the error only all the 10th iterations
err = np.linalg.norm(coupling - couplingPrev)
log['err'].append(err)
if verbose:
if cpt % 200 == 0:
print('{:5s}|{:12s}'.format(
'It.', 'Err') + '\n' + '-' * 19)
print('{:5d}|{:8e}|'.format(cpt, err))
cpt += 1
log['gw_dist'] = gwloss(constC, hCx, hCy, coupling)
return coupling, log
def find_correspondences(self, e, balanced=True, rho=5e-2, verbose=True):
self.flag = True
if balanced:
self.coupling, log = ot.gromov.entropic_gromov_wasserstein(self.Cx, self.Cy, self.p, self.q, 'square_loss', epsilon=e, log=True, verbose=verbose)
self.gwdist= log['gw_dist']
if (np.isnan(self.coupling).any() or np.isinf(self.coupling).any() or np.sum(self.coupling) < .98):
#sys.exit("Alignment algorithm did not converge. This is very likely due to low e values (the epsilon parameter set for entropic regularization constant). Please try again with a higher e value. We recommend not going below 5e-4.")
self.flag = False
else:
self.coupling, log = self.unbalanced_entropic_gromov_wasserstein(e=e, rho= rho, loss_fun="square_loss", max_iter=1000, tol=1e-6, verbose=True)
self.gwdist= log['gw_dist']
if (np.isnan(self.coupling).any() or np.isinf(self.coupling).any()): # Note: It's possible for unbalanced OT to converge and return a coupling matrix where sum does not add up to one. So we cannot check for sum of coupling elements for coupling in this case.
#sys.exit("Alignment algorithm did not converge. This is very likely due to low e values (the epsilon parameter set for entropic regularization constant). Please try again with a higher e value. We recommend not going below 5e-4.")
self.flag = False
def barycentric_projection(self, XontoY=True):
if XontoY:
y_aligned=self.y
X_weights = np.sum(self.coupling, axis = 1)
X_aligned=np.matmul(self.coupling, self.y) / X_weights[:,None]
else:
X_aligned=self.X
y_weights = np.sum(self.coupling, axis = 0)
y_aligned=np.matmul(np.transpose(self.coupling), self.X) / y_weights[:,None]
return X_aligned, y_aligned
def align(self, k, e, balanced=True, rho=1e-3, verbose=True, normalize=True, norm="l2", XontoY=True):
if normalize:
self.normalize(norm=norm)
self.init_marginals()
self.build_kNN(k)
self.compute_graphDistances()
self.find_correspondences(e=e, balanced=balanced, rho=rho, verbose=verbose)
X_aligned, y_aligned = self.barycentric_projection(XontoY)
return X_aligned, y_aligned
def search_scot(self, ks, es, all_values = False):
'''
Performs a hyperparameter sweep for given values of k and epsilon
Default: return the parameters corresponding to the lowest GW distance
(Optional): return all k, epsilon, and GW values
'''
self.init_marginals()
# store values of k, epsilon, and gw distance
k_plot=[]
e_plot=[]
g_plot=[]
total=len(es)*len(ks)
counter=0
# search in k first to reduce graph computation
for k in ks:
self.build_kNN(k)
self.compute_graphDistances()
for e in es:
counter+=1
if (counter % 10 == 0):
| |
if k != 'local_vars'}
qj('some log')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_expected_locals_mods: 'some log' <\d+>: some log"))
# Make sure that none of the existing variables got modified.
self.assertEqual(local_vars, {k: v for k, v in locals().items()
if (k != '__qj_magic_wocha_doin__' and
k != 'local_vars')})
# Make sure that only the new variable name is added.
local_var_names = set([k for k in local_vars.keys()])
local_var_names.add('__qj_magic_wocha_doin__')
local_var_names.add('local_vars')
self.assertEqual(local_var_names, set([k for k in locals().keys()]))
def test_make_global(self):
if hasattr(__builtins__, 'qj'):
delattr(__builtins__, 'qj')
self.assertRaises(AttributeError, lambda: getattr(__builtins__, 'qj'))
qj.make_global()
if __name__ == '__main__':
# Running with `$ python qj/tests/qj_tests.py` goes down this path.
self.assertEqual(qj, __builtins__.__dict__['qj'])
else:
# Running with `$ nosetests` goes down this path.
self.assertEqual(qj, __builtins__['qj'])
def test_multiline(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj('some\nlog')
mock_log_fn.assert_called_once_with(RegExp(
r"qj: <qj_test> test_multiline: 'some\\nlog' <\d+>: \(multiline log follows\)\n"
"some\nlog"))
def test_multiline_with_l(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj(l=lambda _: 'some\nextra\ninfo', x='some\nlog')
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r"qj: <qj_test> test_multiline_with_l: l=lambda _: 'some\\nextra\\ninfo', x='some\\nlog' <\d+>: "
r'\(multiline log follows\)\nsome\nlog')),
mock.call(
RegExp(r'qj:\s+\(multiline log follows\)\nsome\nextra\ninfo')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
def test_multiline_with_r(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
alternative_return_value = 'some other\nreturn value'
out = qj(r=alternative_return_value, x='some\nlog')
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r"qj: <qj_test> test_multiline_with_r: r=alternative_return_value, x='some\\nlog' <\d+>: "
r'\(multiline log follows\)\nsome\nlog')),
mock.call(RegExp(
r'qj:\s+Overridden return value: \(multiline log follows\)\n'
r'some other\nreturn value')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIs(out, alternative_return_value)
def test_r_magic_works_across_modules(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj.make_global()
input_value = 'some log'
out = qj_test_helper.LogToQJ(x=input_value)
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test_helper> LogToQJ: \*\*kwargs <\d+>: some log'))
self.assertIs(out, input_value)
mock_log_fn.reset_mock()
out = qj_test_helper.LogToQJ(x=input_value, r=None)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test_helper> LogToQJ: \*\*kwargs <\d+>: some log')),
mock.call(RegExp(
r'qj:\s+Overridden return value: None')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIsNone(out)
mock_log_fn.reset_mock()
out = qj_test_helper.LogToQJQJ(x=input_value)
mock_log_fn.assert_called_once_with(RegExp(
r'qj: <qj_test_helper> LogToQJQJ: \*\*kwargs <\d+>: some log'))
self.assertIs(out, input_value)
mock_log_fn.reset_mock()
out = qj_test_helper.LogToQJQJ(x=input_value, r=None)
mock_log_fn.assert_has_calls([
mock.call(RegExp(
r'qj: <qj_test_helper> LogToQJQJ: \*\*kwargs <\d+>: some log')),
mock.call(RegExp(
r'qj:\s+Overridden return value: None')),
], any_order=False)
self.assertEqual(mock_log_fn.call_count, 2)
self.assertIsNone(out)
def test_logs_with_positional_args(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
alternative_return_value = 'some other return value'
out = qj('some log', 'some prefix', lambda _: 'some extra info', True,
True, False, False, alternative_return_value, False, True,
False, False, False, False, False, False, False)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_positional_args: some prefix '
r'<\d+>: some log')),
mock.call(
RegExp(r'qj:\s+some extra info')),
mock.call(
RegExp(r'qj:\s+Public properties:\n')),
mock.call(
RegExp(r'qj:\s+Overridden return value: some other return value')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 4)
mock_debug_fn.assert_called_once()
self.assertIs(out, alternative_return_value)
def test_no_logs_with_positional_args(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
input_value = 'some log'
alternative_return_value = 'some other return value'
out = qj('some log', 'some prefix', lambda _: 'some extra info', True,
True, False, False, alternative_return_value, False, False,
False, False, False, False, False, False, False)
mock_log_fn.assert_not_called()
mock_debug_fn.assert_not_called()
self.assertIs(out, input_value)
def test_logs_max_times_ends_with_warning(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.MAX_FRAME_LOGS = 1
original_return_value = 'some log'
alternative_return_value = 'some other return value'
out = []
for _ in range(2):
out.append(qj(original_return_value, 'some prefix', lambda _: 'some extra info', d=True,
p=True, r=alternative_return_value, b=True))
qj('other log', 'other prefix')
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' some prefix <\d+>: some log')),
mock.call(
RegExp(r'qj:\s+some extra info')),
mock.call(
RegExp(r'qj:\s+Public properties:\n')),
mock.call(
RegExp(r'qj:\s+Overridden return value: some other return value')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' Maximum per-frame logging hit \(1\)\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' other prefix <\d+>: other log')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_max_times_ends_with_warning:'
r' Maximum per-frame logging hit \(1\)\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 7)
mock_debug_fn.assert_called_once()
self.assertIs(out[0], alternative_return_value)
self.assertIs(out[1], original_return_value)
def test_logs_with_pad(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj('some log', pad='#')
qj('some other log', pad=3)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'#+')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_pad: 'some log', pad='#' <\d+>: some log")),
mock.call(
RegExp(r'#+')),
mock.call(
RegExp(r'\n\n')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_pad: 'some other log', pad=3 <\d+>: some other log")),
mock.call(
RegExp(r'\n\n')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 6)
def test_logs_with_tictoc(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj('tic log', tic=1)
qj('toc log', toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_tictoc: 'tic log', tic=1 <\d+>: tic log")),
mock.call(
RegExp(r'qj:\s+Added tic\.')),
mock.call(
RegExp(r"qj: <qj_test> test_logs_with_tictoc: 'toc log', toc=1 <\d+>: toc log")),
mock.call(
RegExp(r"qj:\s+\d\.\d\d\d\d seconds since 'tic log', tic=1\.")),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 4)
def test_logs_with_tictoc_no_x(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(tic=1)
qj(toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_no_x: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_no_x: toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_with_tictoc_list_comp(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
_ = [qj(x, tic=1, toc=1) for x in range(2)]
qj(toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_list_comp: x, tic=1, toc=1 <\d+>: 0')),
mock.call(
RegExp(r'qj:\s+Added tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_list_comp: x, tic=1, toc=1 <\d+>: 1')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since x, tic=1, toc=1\.')),
mock.call(
RegExp(r'qj:\s+Added tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_list_comp: \s?toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since x, tic=1, toc=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 7)
def test_logs_with_tictoc_nested(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(tic=1)
qj(tic=2)
qj(toc=1)
qj(toc=1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: tic=2 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=2\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_nested: toc=1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 6)
def test_logs_with_tictoc_negative_toc(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(tic=1)
qj(tic=2)
qj(toc=-1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_negative_toc: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_negative_toc: tic=2 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_negative_toc: toc=-1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=2\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 5)
self.assertEqual(len(qj._tics), 0)
def test_logs_with_tictoc_across_fn_calls(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
def tictoc_across_fn_calls():
qj(tic=2)
qj(tic=1)
tictoc_across_fn_calls()
qj(toc=-1)
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_across_fn_calls: tic=1 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> tictoc_across_fn_calls: tic=2 <\d+>: Adding tic\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_across_fn_calls: toc=-1 <\d+>: Computing toc\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=2\.')),
mock.call(
RegExp(r'qj:\s+\d\.\d\d\d\d seconds since tic=1\.')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 5)
self.assertEqual(len(qj._tics), 0)
def test_logs_with_tictoc_no_unmatched_tic(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
qj._tics = [] # Ensure an empty tic stack.
qj(toc=1)
mock_log_fn.assert_called_once_with(
RegExp(r'qj: <qj_test> test_logs_with_tictoc_no_unmatched_tic: toc=1 <\d+>: Unable to compute toc -- no unmatched tic\.'))
self.assertEqual(len(qj._tics), 0)
def test_logs_with_time(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
def foo():
pass
qj(foo, time=1)()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time: foo, time=1 <\d+>: <function .*foo at 0x.*>')),
mock.call(
RegExp(r'qj:\s+Wrapping return value in timing function\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time: Average timing for <function .*foo at 0x.*> across 1 call <\d+>: \d\.\d\d\d\d seconds')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_with_time_decorator(self):
with mock.patch('logging.info') as mock_log_fn:
qj.LOG_FN = mock_log_fn
@qj(time=1)
def foo():
pass
foo()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time_decorator: time=1 <\d+>: Preparing decorator to measure timing\.\.\.')),
mock.call(
RegExp(r'qj:\s+Decorating <function .*foo at 0x.*> with timing function\.')),
mock.call().__nonzero__() if sys.version_info[0] < 3 else mock.call().__bool__(), # TODO(iansf): it's unclear why this is necessary in this case.
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_time_decorator: Average timing for <function .*foo at 0x.*> across 1 call <\d+>: \d\.\d\d\d\d seconds')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
def test_logs_with_catch(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
def foo():
raise Exception('FOO')
qj(foo, catch=1)()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch: foo, catch=1 <\d+>: <function .*foo at 0x.*>')),
mock.call(
RegExp(r'qj:\s+Wrapping return value in exception function\.')),
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch: Caught an exception in <function .*foo at 0x.*> <\d+>: FOO')),
],
any_order=False)
self.assertEqual(mock_log_fn.call_count, 3)
self.assertEqual(mock_debug_fn.call_count, 1)
def test_logs_with_catch_decorator(self):
with mock.patch('logging.info') as mock_log_fn:
with mock.patch('ipdb.set_trace') as mock_debug_fn:
qj.LOG_FN = mock_log_fn
qj.DEBUG_FN = mock_debug_fn
@qj(catch=1)
def foo():
raise Exception('FOO')
foo()
mock_log_fn.assert_has_calls(
[
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch_decorator: catch=1 <\d+>: Preparing decorator to catch exceptions\.\.\.')),
mock.call(
RegExp(r'qj:\s+Decorating <function .*foo at 0x.*> with exception function\.')),
mock.call().__nonzero__() if sys.version_info[0] < 3 else mock.call().__bool__(), # TODO(iansf): it's unclear why this is necessary in this case.
mock.call(
RegExp(r'qj: <qj_test> test_logs_with_catch_decorator: Caught an | |
<reponame>spenczar/precovery
import dataclasses
import glob
import itertools
import logging
import os
import struct
from typing import (
Iterable,
Iterator,
Optional,
Set,
Tuple
)
import numpy as np
import sqlalchemy as sq
from rich.progress import (
BarColumn,
Progress,
TimeElapsedColumn,
TimeRemainingColumn
)
from sqlalchemy.sql import func as sqlfunc
from . import sourcecatalog
from .orbit import Ephemeris
from .spherical_geom import haversine_distance_deg
# ra, dec, ra_sigma, dec_sigma, mag, mag_sigma, id
DATA_LAYOUT = "<ddddddl"
logger = logging.getLogger("frame_db")
@dataclasses.dataclass
class HealpixFrame:
id: Optional[int]
obscode: str
exposure_id: str
filter: str
mjd: float
healpixel: int
data_uri: str
data_offset: int
data_length: int
@dataclasses.dataclass
class FrameBundleDescription:
obscode: str
start_epoch: float
end_epoch: float
healpixel: int
n_frames: int
def epoch_midpoint(self) -> float:
return (self.start_epoch + self.end_epoch) / 2.0
@dataclasses.dataclass
class FrameWindow:
start_epoch: float
end_epoch: float
n_frames: int
def epoch_midpoint(self) -> float:
return (self.start_epoch + self.end_epoch) / 2.0
@dataclasses.dataclass
class Observation:
ra: float
dec: float
ra_sigma: float
dec_sigma: float
mag: float
mag_sigma: float
id: bytes
data_layout = struct.Struct(DATA_LAYOUT)
datagram_size = struct.calcsize(DATA_LAYOUT)
def to_bytes(self) -> bytes:
prefix = self.data_layout.pack(
self.ra,
self.dec,
self.ra_sigma,
self.dec_sigma,
self.mag,
self.mag_sigma,
len(self.id),
)
return prefix + self.id
@classmethod
def from_srcobs(cls, so: sourcecatalog.SourceObservation):
"""
Cast a SourceObservation to an Observation.
"""
return cls(
ra=so.ra,
dec=so.dec,
ra_sigma=so.ra_sigma,
dec_sigma=so.dec_sigma,
mag=so.mag,
mag_sigma=so.mag_sigma,
id=so.id,
)
class FrameIndex:
def __init__(self, db_engine):
self.db = db_engine
self.dbconn = self.db.connect()
self.initialize_tables()
@classmethod
def open(cls, db_uri, mode: str = "r"):
if (mode != "r") and (mode != "w"):
err = (
"mode should be one of {'r', 'w'}"
)
raise ValueError(err)
if db_uri.startswith('sqlite:///') and (mode == "r"):
db_uri += "?mode=ro"
engine = sq.create_engine(db_uri)
return cls(engine)
def close(self):
self.dbconn.close()
def window_centers(
self, start_mjd: float, end_mjd: float, window_size_days: int
) -> Iterator[Tuple[float, str]]:
"""
Return the midpoint and obscode of all time windows with data in them.
"""
offset = -start_mjd + window_size_days / 2
# select distinct
# (cast(mjd - first + (window_size_days / 2) as int) / windows_size_days)
# * window_size_days + first as common_epoch
# from frames;
stmt = (
sq.select(
(
(
sq.cast(
self.frames.c.mjd + offset,
sq.Integer,
)
/ window_size_days
)
* window_size_days
+ start_mjd
).label("common_epoch"),
self.frames.c.obscode,
)
.distinct()
.where(
self.frames.c.mjd >= start_mjd,
self.frames.c.mjd <= end_mjd,
)
.order_by("common_epoch")
)
rows = self.dbconn.execute(stmt)
for mjd, obscode in rows:
yield (mjd, obscode)
def propagation_targets(
self, start_mjd: float, end_mjd: float, obscode: str
) -> Iterator[Tuple[float, Set[int]]]:
"""
Yields (mjd, {healpixels}) pairs for the given obscode in the given range
of MJDs.
The yielded mjd is a float MJD epoch timestamp to propagate to, and
{healpixels} is a set of integer healpixel IDs.
"""
select_stmt = (
sq.select(
self.frames.c.mjd,
self.frames.c.healpixel,
)
.where(
self.frames.c.obscode == obscode,
self.frames.c.mjd >= start_mjd,
self.frames.c.mjd <= end_mjd,
)
.distinct()
.order_by(
self.frames.c.mjd.desc(),
self.frames.c.healpixel,
)
)
rows = self.dbconn.execute(select_stmt)
for mjd, group in itertools.groupby(rows, key=lambda pair: pair[0]):
healpixels = set(pixel for mjd, pixel in group)
yield (mjd, healpixels)
def get_frames(
self, obscode: str, mjd: float, healpixel: int
) -> Iterator[HealpixFrame]:
"""
Yield all the frames which are for given obscode, MJD, healpix.
MJDs are checked to within +- 1e-7 days or 8.64 ms. Any frames that
are within 8.64 ms of the given mjd will be returned. This does not garauntee
that they will represent the desired exposure time and may lead to multiple
matches computed at the wrong observation time.
"""
select_stmt = sq.select(
self.frames.c.id,
self.frames.c.obscode,
self.frames.c.exposure_id,
self.frames.c.filter,
self.frames.c.mjd,
self.frames.c.healpixel,
self.frames.c.data_uri,
self.frames.c.data_offset,
self.frames.c.data_length,
).where(
self.frames.c.obscode == obscode,
self.frames.c.healpixel == int(healpixel),
self.frames.c.mjd >= mjd - 1e-7,
self.frames.c.mjd <= mjd + 1e-7,
)
result = self.dbconn.execute(select_stmt)
# Turn result into a list so we can iterate over it twice: once
# to check the MJDs for uniqueness and a second time to actually
# yield the individual rows
rows = list(result)
# Loop through rows and track MJDs
mjds = set()
for r in rows:
# id, obscode, exposure_id, filter, mjd, healpixel, data uri, data offset, data length
mjds.add(r[4])
if len(mjds) > 1:
logger.warn(
f"Query returned non-unique MJDs for mjd: {mjd}, healpix: {int(healpixel)}, obscode: {obscode}."
)
for r in rows:
yield HealpixFrame(*r)
def n_frames(self) -> int:
select_stmt = sq.select(sqlfunc.count(self.frames.c.id))
row = self.dbconn.execute(select_stmt).fetchone()
return row[0]
def n_bytes(self) -> int:
select_stmt = sq.select(sqlfunc.sum(self.frames.c.data_length))
row = self.dbconn.execute(select_stmt).fetchone()
return row[0]
def n_unique_frames(self) -> int:
"""
Count the number of unique (obscode, mjd, healpixel) triples in the index.
This is not the same as the number of total frames, because those
triples might have multiple data URIs and offsets.
"""
subq = (
sq.select(self.frames.c.obscode, self.frames.c.mjd, self.frames.c.healpixel)
.distinct()
.subquery()
)
stmt = sq.select(sqlfunc.count(subq.c.obscode))
row = self.dbconn.execute(stmt).fetchone()
return row[0]
def frames_for_bundle(
self, bundle: FrameBundleDescription
) -> Iterator[HealpixFrame]:
"""
Yields frames which match a particular bundle.
"""
select_stmt = (
sq.select(self.frames)
.where(
self.frames.c.mjd >= bundle.start_epoch,
self.frames.c.mjd <= bundle.end_epoch,
)
.order_by(self.frames.c.mjd.desc())
)
rows = self.dbconn.execute(select_stmt)
for row in rows:
yield HealpixFrame(*row)
def mjd_bounds(self) -> Tuple[float, float]:
"""
Returns the minimum and maximum mjd of all frames in the index.
"""
select_stmt = sq.select(
sqlfunc.min(self.frames.c.mjd, type=sq.Float),
sqlfunc.max(self.frames.c.mjd, type=sq.Float),
)
first, last = self.dbconn.execute(select_stmt).fetchone()
return first, last
def frame_bundles(
self, window_size_days: int, mjd_start: float, mjd_end: float
) -> Iterator[FrameBundleDescription]:
"""
Returns an iterator which yields descriptions of bundles of frames with
a common epoch between start and end (inclusive).
"""
first, _ = self.mjd_bounds()
offset = -first + window_size_days / 2
# select
# obscode,
# (cast(mjd - first + (window_size_days / 2) as int) / windows_size_days)
# * window_size_days + first as common_epoch
# from frames;
subq = (
sq.select(
self.frames.c.obscode,
self.frames.c.healpixel,
self.frames.c.mjd,
(
(
sq.cast(
self.frames.c.mjd + offset,
sq.Integer,
)
/ window_size_days
)
* window_size_days
+ first
).label("common_epoch"),
)
.where(
self.frames.c.mjd >= mjd_start,
self.frames.c.mjd <= mjd_end,
)
.subquery()
)
select_stmt = (
sq.select(
subq.c.common_epoch,
subq.c.obscode,
sqlfunc.min(subq.c.mjd).label("start_epoch"),
sqlfunc.max(subq.c.mjd).label("end_epoch"),
subq.c.healpixel,
sqlfunc.count(1).label("n_frames"),
)
.group_by(
subq.c.obscode,
subq.c.common_epoch,
subq.c.healpixel,
)
.order_by(
subq.c.common_epoch.desc(),
subq.c.obscode,
subq.c.healpixel,
)
)
logger.debug("executing query: %s", select_stmt)
results = self.dbconn.execute(select_stmt)
for row in results:
yield FrameBundleDescription(*row)
def all_frames(self) -> Iterator[HealpixFrame]:
"""
Returns all frames in the index, sorted by obscode, mjd, and healpixel.
"""
stmt = sq.select(
self.frames.c.id,
self.frames.c.obscode,
self.frames.c.exposure_id,
self.frames.c.filter,
self.frames.c.mjd,
self.frames.c.healpixel,
self.frames.c.data_uri,
self.frames.c.data_offset,
self.frames.c.data_length,
).order_by(self.frames.c.obscode, self.frames.c.mjd, self.frames.c.healpixel)
result = self.dbconn.execute(stmt)
for row in result:
yield HealpixFrame(*row)
def add_frame(self, frame: HealpixFrame):
insert = self.frames.insert().values(
obscode=frame.obscode,
exposure_id=frame.exposure_id,
filter=frame.filter,
mjd=frame.mjd,
healpixel=int(frame.healpixel),
data_uri=frame.data_uri,
data_offset=frame.data_offset,
data_length=frame.data_length,
)
self.dbconn.execute(insert)
def initialize_tables(self):
self._metadata = sq.MetaData()
self.frames = sq.Table(
"frames",
self._metadata,
sq.Column(
"id",
sq.Integer,
sq.Sequence("frame_id_seq"),
primary_key=True,
),
sq.Column("obscode", sq.String, index=True),
sq.Column("exposure_id", sq.String),
sq.Column("filter", sq.String),
sq.Column("mjd", sq.Float, index=True),
sq.Column("healpixel", sq.Integer, index=True),
sq.Column("data_uri", sq.String),
sq.Column("data_offset", sq.Integer),
sq.Column("data_length", sq.Integer),
)
self._metadata.create_all(self.db)
class FrameDB:
def __init__(
self,
idx: FrameIndex,
data_root: str,
data_file_max_size: float = 1e9,
healpix_nside: int = 32,
):
self.idx = idx
self.data_root = data_root
self.data_file_max_size = data_file_max_size
self.data_files: dict = {} # basename -> open file
self._open_data_files()
self.n_data_files = 0
self.healpix_nside = healpix_nside
def close(self):
self.idx.close()
for f in self.data_files.values():
f.close()
def load_hdf5(
self,
hdf5_file: str,
skip: int = 0,
limit: Optional[int] = None,
key: str = "data",
chunksize: int = 100000,
):
"""
Load data from an NSC HDF5 catalog file.
hdf5_file: Path to a file on disk.
skip: Number of frames to skip in the file.
limit: Maximum number of frames to load from the file. None means no limit.
key: Name of observations table in the hdf5 file.
chunksize: Load observations in chunks of this size and then iterate over the chunks
to load observations.
"""
for src_frame in sourcecatalog.iterate_frames(
hdf5_file,
limit,
nside=self.healpix_nside,
skip=skip,
key=key,
chunksize=chunksize,
):
observations = [Observation.from_srcobs(o) for o in src_frame.observations]
data_uri, offset, length = self.store_observations(observations)
frame = HealpixFrame(
id=None,
obscode=src_frame.obscode,
exposure_id=src_frame.exposure_id,
filter=src_frame.filter,
mjd=src_frame.mjd,
healpixel=src_frame.healpixel,
data_uri=data_uri,
data_offset=offset,
data_length=length,
)
self.idx.add_frame(frame)
def _open_data_files(self):
matcher = os.path.join(self.data_root, "frames*.data")
files = glob.glob(matcher)
for f in files:
abspath = os.path.abspath(f)
name = os.path.basename(f)
self.data_files[name] = open(abspath, "rb")
self.n_data_files = len(self.data_files) - 1
if self.n_data_files <= 0:
self.new_data_file()
def _current_data_file_name(self):
return "frames_{:08d}.data".format(self.n_data_files)
def _current_data_file_full(self):
return os.path.abspath(
os.path.join(self.data_root, self._current_data_file_name())
)
def _current_data_file_size(self):
return os.stat(self._current_data_file_name()).st_size
def _current_data_file(self):
return self.data_files[self._current_data_file_name()]
def iterate_observations(self, exp: HealpixFrame) -> Iterator[Observation]:
"""
Iterate over the observations stored in a frame.
"""
f = self.data_files[exp.data_uri]
f.seek(exp.data_offset)
data_layout = struct.Struct(DATA_LAYOUT)
datagram_size = struct.calcsize(DATA_LAYOUT)
bytes_read = 0
while bytes_read < exp.data_length:
raw = f.read(datagram_size)
ra, dec, ra_sigma, dec_sigma, mag, | |
<reponame>ValentinoUberti/mcimporter
# -*- coding: utf-8 -*-
# this file is released under public domain and you can use without limitations
# -------------------------------------------------------------------------
# This is a sample controller
# - index is the default action of any application
# - user is required for authentication and authorization
# - download is for downloading files uploaded in the db (does streaming)
# -------------------------------------------------------------------------
from __future__ import division
import json
import csv
import datetime
from calendar import monthrange
import gluon
from datetime import timedelta
import wrapper
import subprocess
import os
import socket
import sys
from imports.writeToXlsx import WriteToXlsx
from imports.yamlImporter import YamlImporter
from imports.attendanceImporter import AttendanceImporter
import calendar
import locale
from datetime import datetime
from calendar import monthrange
def ore_dipendenti():
return locals()
def upload_csv():
def fixHours(h):
decimal=h % 1
number=int(h)
print("Decimal = ",number,decimal)
if decimal >= 0.41 and decimal <0.88:
decimal=0.50
number=float(number) + decimal
elif decimal > 0.88:
decimal=0.00
number=float(number+1)
else:
number=float(number)
#if number==decimal==0:
# number=0
return number
def fixHoursAndRest(h):
decimal=h % 1
number=8 - int(h)
print("Decimal = ",number,decimal)
if decimal >= 0.41 and decimal <0.88:
decimal=0.50
number=float(number) + decimal
elif decimal > 0.88:
decimal=0.00
number=float(number+1)
else:
number=float(number)
#if number==decimal==0:
# number=0
return number
def fixHoursAndRestFriday(h):
decimal=h % 1
number=int(h) -7
print("Decimal Friday = ",number,decimal)
if decimal >= 0.41 and decimal <0.88:
decimal=0.50
number=float(number) + decimal
elif decimal > 0.88:
decimal=0.00
number=float(number+1)
else:
number=float(number)
#if number==decimal==0:
# number=0
return number
all=[]
# Save the uploaded file
xlsx=request.vars['csvfile[]'].value
outFileName=filepath = os.path.join(request.folder, 'uploads', "hours_uploaded.xlsx")
outFile=open(outFileName,"w")
outFile.write(xlsx)
outFile.close()
pamaster_path=os.path.join(request.folder, 'static/timbratore', "pamaster.xlsm")
workers_path=os.path.join(request.folder, 'static/timbratore', "workers.yaml")
locale.setlocale(locale.LC_ALL, 'it_IT.UTF-8') # Italian on windows
yamlData = YamlImporter(workers_path)
data = yamlData.importYaml()
attendance=AttendanceImporter(outFileName)
attendance.loadData()
#attendance.orderData()
monthNumber=attendance.finalOrderedActions.days[32]
monthName=calendar.month_name[monthNumber].title()
year=datetime.now().year
downloadFileName="timbrature-"+monthName+"-"+str(year)+".xlsx"
saved_path=os.path.join(request.folder, 'static/timbratore', downloadFileName)
XLSM = WriteToXlsx(pamaster_path, saved_path)
rowsMonth=[1,38,75,112,149]
for i in rowsMonth:
XLSM.write(i,45,monthName)
#Fix day name
for row in data:
startingRow=int(row.startingRow) -2
num_days = monthrange(year, monthNumber)[1]
for day in range(1,num_days+1):
currentDate=datetime.strptime("{0}/{1}/{2}".format(day,monthNumber,year),"%d/%m/%Y")
dayName=currentDate.strftime("%A")[:3]
XLSM.write(int(startingRow),(day*2)-1+4,dayName.upper())
for day in attendance.finalOrderedActions.days:
if day < 32:
for worker in attendance.finalOrderedActions.days[day]:
hours=attendance.finalOrderedActions.days[day][worker]
print("Day {}, Worker {}, Hour {}".format(day,worker,hours))
startingRow=yamlData.returnStartingRow(worker)
currentDate=datetime.strptime("{0}/{1}/{2}".format(day,monthNumber,year),"%d/%m/%Y")
dayOfTheWeek=currentDate.weekday()
if dayOfTheWeek ==4: #Friday
if hours > 6.9:
XLSM.write(int(startingRow),(day*2)-1+5,7)
XLSM.write(int(startingRow)+1,(day*2)-1+5,fixHoursAndRestFriday(hours))
XLSM.write(int(startingRow)+1,(day*2)-2+5,"S1")
XLSM.write(int(startingRow)+2,(day*2)-1+5,1)
XLSM.write(int(startingRow)+2,(day*2)-2+5,"FR")
else:
if hours>0:
XLSM.write(int(startingRow),(day*2)-1+5,fixHours(hours))
XLSM.write(int(startingRow)+2,(day*2)-1+5,fixHoursAndRest(hours)-1)
XLSM.write(int(startingRow)+2,(day*2)-2+5,"FR")
else:
if hours > 7.9:
XLSM.write(int(startingRow),(day*2)-1+5,8)
if fixHours(hours) - 8 >0:
XLSM.write(int(startingRow)+1,(day*2)-2+5,"S1")
XLSM.write(int(startingRow)+1,(day*2)-1+5,fixHours(hours) -8 )
else:
if hours>0:
XLSM.write(int(startingRow),(day*2)-1+5,fixHours(hours))
XLSM.save()
all.append(URL('static/timbratore',downloadFileName))
return response.json(all)
@service.jsonrpc
@service.jsonrpc2
def stampa_rcp(args):
id_riga_in_produzione=args['0']
row = db(db.articoli_in_produzione.id == id_riga_in_produzione).select().first()
scadenza=datetime.datetime.strptime(str(row.data_consegna),"%Y-%m-%d %H:%M:%S").strftime("%d/%m/%Y")
cliente=row.cliente
riferimento_ordine=row.riferimento_ordine
codice_ordine=row.codice_ordine
codice_articolo=row.codice_articolo
descrizione=row.descrizione
saldo=row.qta_saldo
id_riga=row.id_riga
dettaglio_ordine = db(db.ordine_cliente.ultimo_codice_ordine==codice_ordine).select().first()
# print dettaglio_ordine
try:
ente=dettaglio_ordine.ente
if ente is None:
ente="Nessuno"
except:
ente="Nessuno"
# print "Ente : ",ente
try:
revisione = str(db(db.anagrafica_articoli.codice_articolo == codice_articolo).select().first().revisione)
# print "revisione = "+ revisione
except Exception,e:
# print e.message
pass
dettagli=db(db.anagrafica_articoli.codice_articolo==codice_articolo).select().first()
giacenza=dettagli.giacenza
ubicazione=dettagli.ubicazione
cartella=dettagli.cartella_disegno
peso=dettagli.peso
if peso is None:
peso=""
p = CONTROLLO_PRODUZIONE("Microcarp S.r.l.","Registro dei Controlli in Produzione")
p.intestazione(cliente,riferimento_ordine, codice_articolo,scadenza,revisione, saldo,giacenza,ubicazione,cartella,peso)
p.footer(str(id_riga),ente)
lavorazioni=db(db.lavorazioni).select()
for lavorazione in lavorazioni:
p.add_row(lavorazione.nome,lavorazione.controllo)
p.insert_rows()
p.create_pdf()
@service.jsonrpc
@service.jsonrpc2
def crea_fattura(args):
id_cliente=args['0']
# print "ID CLIENTE : ",id_cliente
numero_corrente_fattura = db(db.fattura).select().first()["numero_fattura"]
numero = int(numero_corrente_fattura.split("/")[0])
anno = int(numero_corrente_fattura.split("/")[1])
numero +=1
numero_fattura_da_salvare = str(numero)+"/"+str(anno)
"""
Dati cliente
"""
dati_cliente = db(db.clienti.id == id_cliente).select().first()
nome_cliente=dati_cliente.nome
citta_cliente = dati_cliente.citta
indirizzo_cliente = dati_cliente.indirizzo
cap_cliente = dati_cliente.cap
provincia_cliente = dati_cliente.provincia
cf_cliente = dati_cliente.codice_fiscale
pi_cliente = dati_cliente.partita_iva
nazione_cliente = dati_cliente.nazione
codice_banca = dati_cliente.codice_banca
iban_cliente = dati_cliente.codice_iban
dettagli_banca = db(db.anagrafica_banche.descrizione == codice_banca).select().first()
scritta_esenzione_cliente = dati_cliente.descrizione_esenzione_iva
annotazioni=dati_cliente.annotazioni
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
for r in ddts_id:
data_scelta = r.data_emissione
m = datetime.datetime.strptime(data_scelta,"%d/%m/%Y").date()
# print "MESE : "+str(m.month)
day_start,day_end = monthrange(m.year, m.month)
d = str(day_end)+"/"+str(m.month)+"/"+str(m.year)
start_date = datetime.datetime.strptime(d,"%d/%m/%Y")
fattura = FATTURA("FATTURA DIFFERITA",start_date.strftime("%d/%m/%Y"),numero_fattura_da_salvare)
fattura.intestazione(nome_cliente,citta_cliente,indirizzo_cliente,cap_cliente,provincia_cliente,nazione_cliente,cf_cliente,pi_cliente)
try:
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(dettagli_banca.iban),"PAGAMENTO","SCADENZA")
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
ddts_id = db(db.ddt_da_fatturare.user_id == auth.user_id).select()
fattura.rows=[]
lista_codici_iva = {}
importo_totale = 0
imposta_totale = 0
imposta_iva = 0
lista_ddt = []
scritta_esenzione = False
for ddt_id in ddts_id:
lista_ddt.append(ddt_id.ddt_id)
riferimento_ddt = "Rif. DDT : " + ddt_id.numero_ddt + " del " + ddt_id.data_emissione
fattura.add_row("",riferimento_ddt,"","","","","","","")
rows = db(db.saved_righe_in_ddt_cliente.saved_ddt_id == ddt_id.ddt_id).select()
# print "DDT ID : ",ddt_id.ddt_id
for row in rows:
"""
<Row {'n_riga': '3', 'prezzo': '8.9919', 'saved_ddt_id': '21', 'quantita': '11', 'evasione': datetime.datetime(2017, 1, 31, 8, 56), 'id': 10L, 'codice_articolo': '892069925', 'codice_iva': 'Iva 22%', 'descrizione': 'FLANGIA', 'sconti': None, 'u_m': 'Nr', 'user_id': '1', 'codice_ordine': '1/17', 'id_ordine': '26', 'riferimento_ordine': 'fdsfsdf'}>
"""
"""
La riga del ddt contiene i dati relativi all'ordine (id_ordine)
siccome il pagamento può essere modificato bisogna risalire all'ordine
poi al tipo di pagamento, poi ai giorni e calcolare la data
"""
if not "commento" in row.codice_articolo:
id_ordine = row.id_ordine
try:
try:
pagamento = db(db.ordine_cliente.id == id_ordine).select().first()["pagamento"]
# print "pagamento = ",pagamento
except:
pagamento = None
if pagamento is None:
pagamento = db(db.clienti.id == id_cliente).select().first()["pagamento"]
if "F.M." in pagamento:
fine_mese = True
else:
fine_mese = False
if not fine_mese:
try:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = datetime.datetime.now().date() + datetime.timedelta(days = int(giorni_da_aggiungere))
scadenza_salvata = scadenza
scadenza = scadenza.strftime("%d/%m/%Y")
except:
response.flash="Tipo di pagamento '{0}' non esistente in anagraficaca pagamenti".format(pagamento)
return locals()
else:
if ("M.S." or "ms") in pagamento:
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
giorni_mese_successivo = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni_mese_successivo"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
scadenza = datetime.datetime.strptime(scadenza,"%d/%m/%Y")
scadenza = scadenza.date() + datetime.timedelta(days = int(giorni_mese_successivo))
scadenza = scadenza.strftime("%d/%m/%Y")
else:
# Fine mese senza M.S.
giorni_da_aggiungere = db(db.codici_pagamenti.descrizione_codice_pagamento == pagamento).select().first()["giorni"]
if start_date.date().month==12 or start_date.date().month==1 or start_date.date().month==2:
if int(giorni_da_aggiungere)==60:
giorni_da_aggiungere="56"
if int(giorni_da_aggiungere)==90:
giorni_da_aggiungere="86"
if int(giorni_da_aggiungere)==120:
giorni_da_aggiungere="116"
scadenza = start_date.date() + datetime.timedelta(days = int(giorni_da_aggiungere))
day_start,day_end = monthrange(scadenza.year, scadenza.month)
scadenza = str(day_end)+"/"+str(scadenza.month)+"/"+str(scadenza.year)
pass
fattura.dettaglio(str(id_cliente),dettagli_banca.descrizione,str(iban_cliente),pagamento,str(scadenza))
except Exception,e:
# print e
response.flash="Controllare il tipo di pagamento in anagrafica"
return locals()
# print "Aggiunta rig"
sconti = row.sconti
if row.sconti is None:
sconti=""
try:
if row.prezzo == "0":
row.prezzo = ""
f = float(row.prezzo)
# print "SONO QUI : PREZZO = ".format(f)
except:
msg = "Prezzo non presente " + riferimento_ddt + " Cod.Art : " + row.codice_articolo
response.flash=msg
return locals()
try:
f=float(row.quantita)
except:
msg = "Quantità non valida Cod.Art : " + row.codice_articolo + " Qta : " +row.qta
response.flash=msg
return locals()
pass
importo = saved_importo = float(row.quantita) * float(row.prezzo)
importo = Money(str(importo),"EUR")
importo = importo.format("it_IT").encode('ascii', 'ignore').decode('ascii')
prezzo = str(row.prezzo).replace(".",",")
codice_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == row.codice_iva).select().first()["codice_iva"]
# print "VALLLLE " + row.codice_iva
descrizione_codice_iva = db(db.righe_in_ordine_cliente.id == row.id_riga_ordine, db.righe_in_ordine_cliente.n_riga==row.n_riga).select().first()["codice_iva"]
codice_iva=db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["codice_iva"]
row.codice_iva=codice_iva
if "Esenzione" in descrizione_codice_iva:
scritta_esenzione = True
percentuale_iva = db(db.anagrafica_codici_iva.descrizione_codice_iva == descrizione_codice_iva).select().first()["percentuale_iva"]
importo_totale +=saved_importo
imposta_totale += return_imposta(saved_importo,int(percentuale_iva))
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = saved_importo
else:
lista_codici_iva[codice_iva] += saved_importo
else:
"""
Passo il commento ma resetto tutti i campi
"""
row.riferimento_ordine=""
row.u_m=""
row.quantita=""
prezzo=""
sconti=""
importo=""
codice_iva=""
row.codice_articolo=""
# row.descrizione=row.commento
fattura.add_row(row.codice_articolo,row.descrizione,row.riferimento_ordine,row.u_m,row.quantita,prezzo,sconti,importo,codice_iva)
r = db(db.ddt_cliente.id == ddt_id.ddt_id).select().first()
r.update_record(fattura_emessa = "T")
# print lista_codici_iva
bollo= dati_cliente.bollo
if bollo:
print "<NAME>"
codice_articolo="BOLLO"
descrizione="art. 15 DPR 633/72"
riferimento_ordine=""
quantita="1"
prezzo="2,00"
sconti=""
codice_iva="53"
u_m="Nr"
importo="2,00"
fattura.add_row(codice_articolo,descrizione,riferimento_ordine,u_m,quantita,prezzo,sconti,importo,codice_iva)
if not codice_iva in lista_codici_iva:
lista_codici_iva[codice_iva] = 2
else:
lista_codici_iva[codice_iva] +=2
if scritta_esenzione:
fattura.add_row("","","","","","","","","")
fattura.add_row("","","","","","","","","")
scritte = scritta_esenzione_cliente.split(",")
for scritta in scritte:
fattura.add_row("",scritta,"","","","","","","")
bollo_presente = False
for k,v in lista_codici_iva.iteritems():
codice_iva = k
importo_netto = v
# print "LISTA CODICI : ",codice_iva,importo_netto
dettaglio_iva = db(db.anagrafica_codici_iva.codice_iva == codice_iva).select().first()
percentuale_iva = dettaglio_iva.percentuale_iva
descrizione_iva = dettaglio_iva.descrizione_codice_iva
imposta_iva = return_imposta(v,percentuale_iva)
if dettaglio_iva.bollo_su_importi_esenti is True:
if not bollo_presente:
bollo = db(db.bolli.descrizione=="Fattura").select().first()["valore"]
bollo_presente = True
fattura.footer_2(codice_iva,"",return_currency(importo_netto),descrizione_iva,return_currency(imposta_iva),"")
| |
if (section, key) in self.sensitive_config_values:
if super().has_option(section, fallback_key):
secrets_path = super().get(section, fallback_key)
return _get_config_value_from_secret_backend(secrets_path)
return None
def get(self, section, key, **kwargs):
section = str(section).lower()
key = str(key).lower()
deprecated_section, deprecated_key, _ = self.deprecated_options.get(
(section, key), (None, None, None)
)
option = self._get_environment_variables(deprecated_key, deprecated_section, key, section)
if option is not None:
return option
option = self._get_option_from_config_file(deprecated_key, deprecated_section, key, kwargs, section)
if option is not None:
return option
option = self._get_option_from_commands(deprecated_key, deprecated_section, key, section)
if option is not None:
return option
option = self._get_option_from_secrets(deprecated_key, deprecated_section, key, section)
if option is not None:
return option
return self._get_option_from_default_config(section, key, **kwargs)
def _get_option_from_default_config(self, section, key, **kwargs):
# ...then the default config
if self.airflow_defaults.has_option(section, key) or 'fallback' in kwargs:
return expand_env_var(self.airflow_defaults.get(section, key, **kwargs))
else:
log.warning("section/key [%s/%s] not found in config", section, key)
raise AirflowConfigException(f"section/key [{section}/{key}] not found in config")
def _get_option_from_secrets(self, deprecated_key, deprecated_section, key, section):
# ...then from secret backends
option = self._get_secret_option(section, key)
if option:
return option
if deprecated_section:
option = self._get_secret_option(deprecated_section, deprecated_key)
if option:
self._warn_deprecate(section, key, deprecated_section, deprecated_key)
return option
return None
def _get_option_from_commands(self, deprecated_key, deprecated_section, key, section):
# ...then commands
option = self._get_cmd_option(section, key)
if option:
return option
if deprecated_section:
option = self._get_cmd_option(deprecated_section, deprecated_key)
if option:
self._warn_deprecate(section, key, deprecated_section, deprecated_key)
return option
return None
def _get_option_from_config_file(self, deprecated_key, deprecated_section, key, kwargs, section):
# ...then the config file
if super().has_option(section, key):
# Use the parent's methods to get the actual config here to be able to
# separate the config from default config.
return expand_env_var(super().get(section, key, **kwargs))
if deprecated_section:
if super().has_option(deprecated_section, deprecated_key):
self._warn_deprecate(section, key, deprecated_section, deprecated_key)
return expand_env_var(super().get(deprecated_section, deprecated_key, **kwargs))
return None
def _get_environment_variables(self, deprecated_key, deprecated_section, key, section):
# first check environment variables
option = self._get_env_var_option(section, key)
if option is not None:
return option
if deprecated_section:
option = self._get_env_var_option(deprecated_section, deprecated_key)
if option is not None:
self._warn_deprecate(section, key, deprecated_section, deprecated_key)
return option
return None
def getboolean(self, section, key, **kwargs):
val = str(self.get(section, key, **kwargs)).lower().strip()
if '#' in val:
val = val.split('#')[0].strip()
if val in ('t', 'true', '1'):
return True
elif val in ('f', 'false', '0'):
return False
else:
raise AirflowConfigException(
f'Failed to convert value to bool. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getint(self, section, key, **kwargs):
val = self.get(section, key, **kwargs)
try:
return int(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to int. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getfloat(self, section, key, **kwargs):
val = self.get(section, key, **kwargs)
try:
return float(val)
except ValueError:
raise AirflowConfigException(
f'Failed to convert value to float. Please check "{key}" key in "{section}" section. '
f'Current value: "{val}".'
)
def getimport(self, section, key, **kwargs):
"""
Reads options, imports the full qualified name, and returns the object.
In case of failure, it throws an exception with the key and section names
:return: The object or None, if the option is empty
"""
full_qualified_path = conf.get(section=section, key=key, **kwargs)
if not full_qualified_path:
return None
try:
return import_string(full_qualified_path)
except ImportError as e:
log.error(e)
raise AirflowConfigException(
f'The object could not be loaded. Please check "{key}" key in "{section}" section. '
f'Current value: "{full_qualified_path}".'
)
def getjson(self, section, key, fallback=_UNSET, **kwargs) -> Union[dict, list, str, int, float, None]:
"""
Return a config value parsed from a JSON string.
``fallback`` is *not* JSON parsed but used verbatim when no config value is given.
"""
# get always returns the fallback value as a string, so for this if
# someone gives us an object we want to keep that
default = _UNSET
if fallback is not _UNSET:
default = fallback
fallback = _UNSET
try:
data = self.get(section=section, key=key, fallback=fallback, **kwargs)
except (NoSectionError, NoOptionError):
return default
if len(data) == 0:
return default if default is not _UNSET else None
try:
return json.loads(data)
except JSONDecodeError as e:
raise AirflowConfigException(f'Unable to parse [{section}] {key!r} as valid json') from e
def read(self, filenames, encoding=None):
super().read(filenames=filenames, encoding=encoding)
def read_dict(self, dictionary, source='<dict>'):
super().read_dict(dictionary=dictionary, source=source)
def has_option(self, section, option):
try:
# Using self.get() to avoid reimplementing the priority order
# of config variables (env, config, cmd, defaults)
# UNSET to avoid logging a warning about missing values
self.get(section, option, fallback=_UNSET)
return True
except (NoOptionError, NoSectionError):
return False
def remove_option(self, section, option, remove_default=True):
"""
Remove an option if it exists in config from a file or
default config. If both of config have the same option, this removes
the option in both configs unless remove_default=False.
"""
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option)
def getsection(self, section: str) -> Optional[Dict[str, Union[str, int, float, bool]]]:
"""
Returns the section as a dict. Values are converted to int, float, bool
as required.
:param section: section from the config
:rtype: dict
"""
if not self.has_section(section) and not self.airflow_defaults.has_section(section):
return None
if self.airflow_defaults.has_section(section):
_section = OrderedDict(self.airflow_defaults.items(section))
else:
_section = OrderedDict()
if self.has_section(section):
_section.update(OrderedDict(self.items(section)))
section_prefix = self._env_var_name(section, '')
for env_var in sorted(os.environ.keys()):
if env_var.startswith(section_prefix):
key = env_var.replace(section_prefix, '')
if key.endswith("_CMD"):
key = key[:-4]
key = key.lower()
_section[key] = self._get_env_var_option(section, key)
for key, val in _section.items():
try:
val = int(val)
except ValueError:
try:
val = float(val)
except ValueError:
if val.lower() in ('t', 'true'):
val = True
elif val.lower() in ('f', 'false'):
val = False
_section[key] = val
return _section
def write(self, fp, space_around_delimiters=True):
# This is based on the configparser.RawConfigParser.write method code to add support for
# reading options from environment variables.
if space_around_delimiters:
delimiter = f" {self._delimiters[0]} "
else:
delimiter = self._delimiters[0]
if self._defaults:
self._write_section(fp, self.default_section, self._defaults.items(), delimiter)
for section in self._sections:
self._write_section(fp, section, self.getsection(section).items(), delimiter)
def as_dict(
self,
display_source=False,
display_sensitive=False,
raw=False,
include_env=True,
include_cmds=True,
include_secret=True,
) -> Dict[str, Dict[str, str]]:
"""
Returns the current configuration as an OrderedDict of OrderedDicts.
:param display_source: If False, the option value is returned. If True,
a tuple of (option_value, source) is returned. Source is either
'airflow.cfg', 'default', 'env var', or 'cmd'.
:type display_source: bool
:param display_sensitive: If True, the values of options set by env
vars and bash commands will be displayed. If False, those options
are shown as '< hidden >'
:type display_sensitive: bool
:param raw: Should the values be output as interpolated values, or the
"raw" form that can be fed back in to ConfigParser
:type raw: bool
:param include_env: Should the value of configuration from AIRFLOW__
environment variables be included or not
:type include_env: bool
:param include_cmds: Should the result of calling any *_cmd config be
set (True, default), or should the _cmd options be left as the
command to run (False)
:type include_cmds: bool
:param include_secret: Should the result of calling any *_secret config be
set (True, default), or should the _secret options be left as the
path to get the secret from (False)
:type include_secret: bool
:rtype: Dict[str, Dict[str, str]]
:return: Dictionary, where the key is the name of the section and the content is
the dictionary with the name of the parameter and its value.
"""
config_sources: Dict[str, Dict[str, str]] = {}
configs = [
('default', self.airflow_defaults),
('airflow.cfg', self),
]
self._replace_config_with_display_sources(config_sources, configs, display_source, raw)
# add env vars and overwrite because they have priority
if include_env:
self._include_envs(config_sources, display_sensitive, display_source, raw)
# add bash commands
if include_cmds:
self._include_commands(config_sources, display_sensitive, display_source, raw)
# add config from secret backends
if include_secret:
self._include_secrets(config_sources, display_sensitive, display_source, raw)
return config_sources
def _include_secrets(self, config_sources, display_sensitive, display_source, raw):
for (section, key) in self.sensitive_config_values:
opt = self._get_secret_option(section, key)
if opt:
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'secret')
elif raw:
opt = opt.replace('%', '%%')
config_sources.setdefault(section, OrderedDict()).update({key: opt})
del config_sources[section][key + '_secret']
def _include_commands(self, config_sources, display_sensitive, display_source, raw):
for (section, key) in self.sensitive_config_values:
opt = self._get_cmd_option(section, key)
if not opt:
continue
if not display_sensitive:
opt = '< hidden >'
if display_source:
opt = (opt, 'cmd')
elif raw:
opt = opt.replace('%', '%%')
config_sources.setdefault(section, OrderedDict()).update({key: opt})
del config_sources[section][key + '_cmd']
def _include_envs(self, config_sources, display_sensitive, display_source, raw):
for env_var in [
os_environment for os_environment in os.environ if os_environment.startswith(self.ENV_VAR_PREFIX)
]:
| |
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 19:10:39 2019
@author: Sneha
"""
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 21 11:57:42 2019
@author: Sneha
"""
import random
import math
import copy
import numpy as np
import matplotlib.pyplot as plt
import time
from IPython import get_ipython
get_ipython().run_line_magic('matplotlib','qt')
show_animation =False
class RRT():
"""
Class for RRT Planning
"""
def __init__(self, start, goal, obstacleList, randArea,
expandDis=0.1, goalSampleRate=5, maxIter=30000):
"""
Setting Parameter
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.randx = randArea[0]
self.randy = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
self.obstacleList = obstacleList
def Planning(self,n,circles,rectangles, animation=True):
"""
Pathplanning
animation: flag for animation on or off
"""
self.nodeList = [self.start]
for i in range(self.maxIter):
rnd = self.get_random_point()
nind = self.GetNearestListIndex(self.nodeList, rnd)
newNode = self.change_course(rnd, nind)
# print(newNode.cost)
# if (np.abs(newNode.x - self.end.x)< (0.1) and np.abs(newNode.y - self.end.y)< (0.1)):
# break;
if self.verify_node(n,newNode, self.obstacleList):
# print('true')
nearinds = self.find_near_nodes(newNode)
newNode = self.select_parent(n,newNode, nearinds)
self.nodeList.append(newNode)
self.rewire(n,newNode, nearinds)
# else:
# print('False')
if animation and i % 1000 == 0:
self.DrawGraph(n,circles,rectangles,rnd)
# generate coruse
lastIndex = self.get_best_last_index()
if lastIndex is None:
return None
path = self.gen_final(lastIndex)
print(len(self.nodeList))
return path
def select_parent(self,n, newNode, nearinds):
if not nearinds:
return newNode
dlist = []
for i in nearinds:
dx = newNode.x - self.nodeList[i].x
dy = newNode.y - self.nodeList[i].y
d = math.sqrt(dx ** 2 + dy ** 2)
theta = math.atan2(dy, dx)
if self.check_collide(n,self.nodeList[i], theta, d):
dlist.append(self.nodeList[i].cost + d)
else:
dlist.append(float("inf"))
mincost = min(dlist)
minind = nearinds[dlist.index(mincost)]
if mincost == float("inf"):
print("mincost is inf")
return newNode
newNode.cost = mincost
newNode.parent = minind
return newNode
def change_course(self, rnd, nind):
# expand tree
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = Node(rnd[0], rnd[1])
currentDistance = math.sqrt(
(rnd[1] - nearestNode.y) ** 2 + (rnd[0] - nearestNode.x) ** 2)
# Find a point within expandDis of nind, and closest to rnd
if currentDistance <= self.expandDis:
pass
else:
newNode.x = nearestNode.x + self.expandDis * math.cos(theta)
newNode.y = nearestNode.y + self.expandDis * math.sin(theta)
newNode.cost = float("inf")
newNode.parent = None
return newNode
def get_random_point(self):
if random.randint(0, 100) > self.goalSampleRate:
rnd = [random.uniform(self.randx[0], self.randx[1]),
random.uniform(self.randy[0], self.randy[1])]
else: # goal point sampling
rnd = [self.end.x, self.end.y]
return rnd
def get_best_last_index(self):
disglist = [self.calc_dist_to_goal(
node.x, node.y) for node in self.nodeList]
goalinds = [disglist.index(i) for i in disglist if i <= self.expandDis]
if not goalinds:
return None
mincost = min([self.nodeList[i].cost for i in goalinds])
for i in goalinds:
if self.nodeList[i].cost == mincost:
return i
return None
def gen_final(self, goalind):
path = [[self.end.x, self.end.y]]
while self.nodeList[goalind].parent is not None:
node = self.nodeList[goalind]
path.append([node.x, node.y])
goalind = node.parent
path.append([self.start.x, self.start.y])
return path
def calc_dist_to_goal(self, x, y):
return np.linalg.norm([x - self.end.x, y - self.end.y])
def find_near_nodes(self, newNode):
nnode = len(self.nodeList)
r = 50.0 * math.sqrt((math.log(nnode) / nnode))
# r = self.expandDis * 5.0
dlist = [(node.x - newNode.x) ** 2 +
(node.y - newNode.y) ** 2 for node in self.nodeList]
nearinds = [dlist.index(i) for i in dlist if i <= r ** 2]
return nearinds
def rewire(self,n, newNode, nearinds):
nnode = len(self.nodeList)
for i in nearinds:
nearNode = self.nodeList[i]
dx = newNode.x - nearNode.x
dy = newNode.y - nearNode.y
d = math.sqrt(dx **
2 + dy ** 2)
scost = newNode.cost + d
if nearNode.cost > scost:
theta = math.atan2(dy, dx)
if self.check_collide(n,nearNode, theta, d):
nearNode.parent = nnode - 1
nearNode.cost = scost
def check_collide(self,n, nearNode, theta, d):
tmpNode = copy.deepcopy(nearNode)
for i in range(int(d / self.expandDis)):
tmpNode.x += self.expandDis * math.cos(theta)
tmpNode.y += self.expandDis * math.sin(theta)
if not self.verify_node(n,tmpNode, self.obstacleList):
return False
return True
def DrawGraph(self,n,circles,rectangles, rnd=None):
"""
Draw Graph
"""
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
# plt.axis([0,0,250,150])
plt.xticks(np.arange(-12.5,12.5,1))
plt.yticks(np.arange(-7.5,7.5,1))
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
# for (ox, oy, size) in self.obstacleList:
# plt.plot(ox, oy, "ok", ms=30 * size)
# fig, ax = plt.subplots()
for i in range(5):
# print(i[0])
# print(i[3],i[4])
# print(i[3])
for j in range(n):
# pri
plt.fill(circles[3][j],circles[4][j], color = 'b' )
# plt.fill(i[3],i[4], color = 'r' )
for i in (rectangles):
#
for k in i[0]:
plt.fill(k[0],k[1], color = i[1])
# ax.legend()
# ax.grid(color=(0,0,0), linestyle='-', linewidth=1)
## plt.axis([0,0,250,150])
# plt.xticks(np.arange(0,25,2))
# plt.yticks(np.arange(0,15,1))
# plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1])
** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def verify_node(self,n, node, obstacleList):
# global radius,clearance
res=1
radius=0.35/2
clearance=0.1
x=node.x
y=node.y
d=(radius)+(clearance)
circ=[]
for i in range(n):
# print(obstacleList[1][0])
circ.append(((x-(obstacleList[1][1][i]/res))*(x-(obstacleList[1][1][i]/res))+ (y-(obstacleList[1][2][i]/res))*(y-(obstacleList[1][2][i]/res)) - ((obstacleList[1][0][i]/res)+d)*((obstacleList[1][0][i]/res)+d)))
# c2= ((x-(-1.17/res))*(x-(-1.17/res))+ (y-(2.31/res))*(y-(2.31/res)) - ((0.81/res)+d)*((0.81/res)+d))
# c3= ((x-(-1.17/res))*(x-(-1.17/res))+ (y-(-2.31/res))*(y-(-2.31/res)) - ((0.81/res)+d)*((0.81/res)+d))
# c4= ((x-(-1.65/res))*(x-(-1.65/res))+ (y-(-4.6/res))*(y-(-4.6/res))- ((0.81/res)+d)*((0.81/res)+d))
# #Capsule
# u=-3.2516 #x-position of the center
# v=3.2505 #y-position of the center
#
# a=(3.1968-1.599)/2 #radius on the x-axis
# b=1.599/2 #radius on the y-axis
# r = [u-a, u+a,u+a, u-a]
# s = [v-b, v-b, v+b,v+b]
#
# u1=u-a
# u2=u+a
# e1= ((x-(u1/res))*(x-(u1/res))+ (y-(v/res))*(y-(v/res)) - ((b/res)+d)*((b/res)+d))
# e2= ((x-(u2/res))*(x-(u2/res))+ (y-(v/res))*(y-(v/res)) - ((b/res)+d)*((b/res)+d))
exist=True
if (x>=(-12.5)+d and x<=(12.5/res)-d and y>=(-7.5/res)+d and y<=(7.5/res)-d):
for c in obstacleList[0]:
if( x>=c[0][0]-d and x<=c[0][1]+d and y>=c[1][0]-d and y<=c[1][2]+d):
# print('1')
exist = False
if(exist is True):
for j in circ:
if(j<=0):
exist=False
# if( x>=((r[0]/res)-d) and x<=((r[1]/res)+d) and y>=((s[0]/res)-d) and y<=((s[2]/res)+d)):
# exist = False
# print('2')
# elif (e1<=0):
# exist=False
# print('3')
# elif (e2<=0):
# exist=False
# elif (c1<=0):
# exist=False
# elif (c2<=0):
# exist=False
# elif (c3<=0):
# exist=False
# elif (c4<=0):
# exist=False
# else:
# exist=True
else:
exist=False
return exist
class Node():
"""
RRT Node
"""
def __init__(self, x, y):
self.x = x
self.y = y
self.cost = 0.0
self.parent = None
def getxs_ys(xs,ys,n):
radius=0
# global resolution,radius,clearance,init,final
t = np.linspace(0, 2*np.pi, 100)
# res=resolution
resolution=1
# n=20
r=[]
x=[]
y=[]
p=[]
q=[]
# #Circles
for i in range(n):
rad=random.uniform(0.1,0.2)
x1=random.uniform(-12.5,12.5)
y1=random.uniform(-7.5,7.5)
r.append(rad)
x.append(x1)
y.append(y1)
p.append(x1+rad*np.cos(t))
q.append(y1+rad*np.sin(t))
# plt.fill((x1+rad*np.cos(t)),(y1+rad*np.sin(t)),'r',edgecolor='b')
circles=[r,x,y,p,q]
# print(r,x,y)
# #Circle 1
# r1 = (0.81/2)/resolution
# n1=-1.65/resolution #x-position of the center
# m1=4.6/resolution #radius on the y-axis
# p1=n1+r1*np.cos(t)
# q1=m1+r1*np.sin(t)
# for i in p1:
# xs.append(i)
# for i in q1:
# ys.append(i)
# #Circle 2
# r2 =( 0.81/2)/resolution
# n2=-1.17 /resolution #x-position of the center
# m2=2.31/resolution #radius on the y-axis
# p2=n2+r2*np.cos(t)
# q2=m2+r2*np.sin(t)
# for i in p2:
# xs.append(i)
# for i in q2:
# ys.append(i)
# #Circle 3
# r3 = (0.81/2)/resolution
# n3=-1.17/resolution #x-position of the center
# m3=-2.31/resolution #radius on the y-axis
# p3=n3+r3*np.cos(t)
# q3=m3+r3*np.sin(t)
# for i in p3:
# xs.append(i)
# for i in q3:
# ys.append(i)
# #Circle 4
# r4 = (0.81/2)/resolution
# n4=-1.65 /resolution #x-position of the center
# m4=-4.6 /resolution #radius on the y-axis
# p4=n4+r4*np.cos(t)
# q4=m4+r4*np.sin(t)
# for i in p4:
# xs.append(i)
# for i in q4:
# ys.append(i)
# #Capsule
# u=-3.2516/resolution #x-position of the center
# v=3.2505/resolution #y-position of the center
#
# a=(((3.1968/resolution)-(1.599/resolution))/2) #radius on the x-axis
# b=(1.599/2)/resolution #radius on the y-axis
# r = [u-a, u+a,u+a, u-a]
# s = [v-b, v-b, v+b,v+b]
# for i in r:
# xs.append(i)
# for i in s:
# ys.append(i)
# u1=u-a
# u2=u+a
# r1=u1+b*np.cos(t)
# s1=v+b*np.sin(t)
# r2=u2+b*np.cos(t)
# s2=v+b*np.sin(t)
# for i in r1:
# xs.append(i)
# for i in s1:
# ys.append(i)
# for i in r2:
# xs.append(i)
# for i in s2:
# ys.append(i)
# #Rectangles
rectangles =[[-11.5,6,0.1,3],[-10.5,0.5,0.1,6],[-7,4,0.1,7],
[-3.7,-1,1.5,0.1],[-4.5,-1,0.1,3],[0.45,5.5,4,0.1],
[2.5,5.5,0.1,4],[7.5,5,0.1,5],[11,4.5,3,0.1],
[11.25,-1.5,2.5,0.1],[9,-4.575,2,0.1],[9.95,-2.5,0.1,4],
[5,-6,0.1,3],[4.3,-4.45,1.5,0.1],[-3,-6.5,0.1,2],
[-1.55,-5.45,3,0.1],[0,-1.5,0.1,8],[1.55,-1.5,3,0.1],
[-1.3,2.5,2.5,0.1],[-2.5,3.55,0.1,2],[4.05,0.5,2,0.1],
[3,-0.45,0.1,2],[5,2.05,0.1,3],[-10.5,3.55,4,0.1],
[-9.5,-5.25,0.1,1.5],[-7.5,-3.5,0.1,2],[-9.5,-6.05,2,0.1],
[-9.55,-4.45,4,0.1],[-9.05,-2.55,3,0.1],[-7.7,0.45,1.5,0.1],[-8.54,4.5,0.1,2]]
for i in range(len(rectangles)):
for j in range(4):
rectangles[i][j]=rectangles[i][j]/resolution
# fig, ax = plt.subplots()
##
##
# ax.fill(r,s,'r',edgecolor='b')
# ax.fill(r1,s1,'r')
# ax.fill(r2,s2,'r')
# ax.fill(p1,q1,'r',edgecolor='b')
# ax.fill(p2,q2,'r',edgecolor='b')
# ax.fill(p3,q3,'r',edgecolor='b')
# ax.fill(p4,q4,'r',edgecolor='b')
# ax.fill(uelpx, uelpy,'b')
rectangle_corner=[]
for i in (rectangles):
x = [i[0]-(i[2]/2), i[0]+(i[2]/2),i[0]+(i[2]/2), i[0]-(i[2]/2)]
y = [i[1]-(i[3]/2), i[1]-(i[3]/2), i[1]+(i[3]/2),i[1]+(i[3]/2)]
for j in x:
xs.append(j)
for j in y:
ys.append(j)
rectangle_corner.append([x,y])
# ax.fill(x, y,'r',edgecolor='b')
# ucir1x=[]
# ucir1y=[]
# for i in range(len(p1)):
# ucir1x.append(p1[i]+radius*np.cos(t))
# ucir1y.append(q1[i]+radius*np.sin(t))
# ucir2x=[]
# ucir2y=[]
# for i in range(len(p2)):
# ucir2x.append(p2[i]+radius*np.cos(t))
# ucir2y.append(q2[i]+radius*np.sin(t))
# ucir3x=[]
# ucir3y=[]
# for i in range(len(p3)):
# ucir3x.append(p3[i]+radius*np.cos(t))
# ucir3y.append(q3[i]+radius*np.sin(t))
# ucir4x=[]
# ucir4y=[]
# for i in range(len(p4)):
# ucir4x.append(p4[i]+radius*np.cos(t))
# ucir4y.append(q4[i]+radius*np.sin(t))
# ucap1x=[]
# ucap1y=[]
# for i in range(len(r1)):
# ucap1x.append(r1[i]+radius*np.cos(t))
# ucap1y.append(s1[i]+radius*np.sin(t))
# ucap2x=[]
# ucap2y=[]
# for i in range(len(r2)):
# ucap2x.append(r2[i]+radius*np.cos(t))
# ucap2y.append(s2[i]+radius*np.sin(t))
# uboxx=[]
# uboxy=[]
# for i in range(4):
# uboxx.append(r[i]+radius*np.cos(t))
# uboxy.append(s[i]+radius*np.sin(t) )
urecBoxes=[]
for i in rectangle_corner:
uboxrx=[]
uboxry=[]
for j in range(4):
uboxrx.append(i[0][j]+radius*np.cos(t))
uboxry.append(i[1][j]+radius*np.sin(t) )
urecBoxes.append([uboxrx,uboxry])
return rectangle_corner,circles,[[urecBoxes,'b'],[rectangle_corner,'r']]
else:
return "Please enter both Initial and Final Points",[],[]
def main():
start = time.time()
print("Start " + __file__)
# ====Search Path with RRT====
ox, oy = [], []
n=10
rect_corners,circles,rectangles=getxs_ys(ox,oy,n)
# obstacleList = | |
is not None:
x1 = x1[index1,]
if flg1 is True:
dot_x1 = dot_x1[index1,]
else:
dot_x1 = numpy.sum(x1**2, 1)
else:
if flg1 is False:
dot_x1 = numpy.sum(x1**2, 1)
if index2 is not None:
x2 = x2[index2,]
if flg2 is True:
dot_x2 = dot_x2[index2,]
else:
dot_x2 = numpy.sum(x2**2, 1)
alpha2 = alpha2[index2,]
else:
if flg2 is False:
dot_x2 = numpy.sum(x2**2, 1)
n1 = x1.shape[0]
nb = n1 / self._blocksize
n2 = alpha2.shape[1]
if output is None:
output = numpy.zeros((n1,n2), numpy.float64)
# handle special cases.
if index2 is not None:
if len(index2) <= self._blocksize:
x1_x2 = numpy.dot(x1, numpy.transpose(x2))
self.KappaSqDis(x1_x2, x1_x2, dot_x1, dot_x2)
output = numpy.dot(x1_x2,alpha2)
return output
# blocking.
lower_limit = 0
upper_limit = 0
for i in range(nb):
upper_limit = upper_limit + self._blocksize
x1_x2 = numpy.transpose(numpy.dot(x2, numpy.transpose(x1[lower_limit:upper_limit,])))
self.KappaSqDis(x1_x2, x1_x2, dot_x1[lower_limit:upper_limit], dot_x2)
output[lower_limit:upper_limit,] = numpy.dot(x1_x2, alpha2)
lower_limit = upper_limit
if lower_limit <= n1:
x1_x2 = numpy.transpose(numpy.dot(x2, numpy.transpose(x1[lower_limit:n1,])))
self.KappaSqDis(x1_x2, x1_x2, dot_x1[lower_limit:n1], dot_x2)
output[lower_limit:n1,] = numpy.dot(x1_x2, alpha2)
return output
## Compute the kernel between the data points in x1 and those in x2,
# then multiply the resulting kernel matrix elementwiesely by the
# the outer-product matrix between y1 and y2. It returns a matrix
# with entry $(ij)$ equal to $K(x1_i,x2_j) \times (y1_i \times y1_j)$.
# Other parameters are defined similarly as those in Dot.
# @param x1 [read] The first set of data points.
# @param y1 [read] The first set of labels.
# @param x2 [read] The second set of data points.
# @param y2 [read] The second set of labels.
# @param index1 [read] The indices into the first set of data points.
# @param index2 [read] The indices into the second set of data points.
# @param output [write] The buffer where the output matrix is written into.
#
def Tensor(self, x1, y1, x2, y2, index1=None, index2=None, output=None):
assert len(x1.shape) == 2, 'Argument 1 has wrong shape'
assert len(y1.shape) == 2, 'Argument 2 has wrong shape'
assert x1.shape[0] == y1.shape[0], \
'Argument 1 and 2 has different dimensions'
assert len(x2.shape) == 2, 'Argument 3 has wrong shape'
assert len(y2.shape) == 2, 'Argument 4 has wrong shape'
assert x2.shape[0] == y2.shape[0], \
'Argument 2 and 3 has different dimensions'
# retrieve remembered data from the cache
flg1 = False
if self._cacheData.has_key(id(x1)):
dot_x1 = self._cacheData[id(x1)]
flg1 = True
flg2 = False
if self._cacheData.has_key(id(x2)):
dot_x2 = self._cacheData[id(x2)]
flg2 = True
if index1 is not None:
x1 = x1[index1,]
if flg1 is True:
dot_x1 = dot_x1[index1,]
else:
dot_x1 = numpy.sum(x1**2, 1)
y1 = y1[index1,]
else:
if flg1 is False:
dot_x1 = numpy.sum(x1**2, 1)
if index2 is not None:
x2 = x2[index2,]
if flg2 is True:
dot_x2 = dot_x2[index2,]
else:
dot_x2 = numpy.sum(x2**2, 1)
y2 = y2[index2,]
else:
if flg2 is False:
dot_x2 = numpy.sum(x2**2, 1)
n1 = x1.shape[0]
nb = n1 / self._blocksize
n2 = x2.shape[0]
if output is None:
output = numpy.zeros((n1,n2), numpy.float64)
# handle special cases:
if index2 is not None:
if len(index2) <= self._blocksize:
x1_x2 = numpy.dot(x1, numpy.transpose(x2))
self.KappaSqDis(x1_x2, x1_x2, dot_x1, dot_x2)
output = numpy.transpose(y1[:,0] * numpy.transpose(y2[:,0] * x1_x2))
return output
# blocking
lower_limit = 0
upper_limit = 0
for i in range(nb):
upper_limit = upper_limit + self._blocksize
x1_x2 = numpy.transpose(numpy.dot(x2, numpy.transpose(x1[lower_limit:upper_limit,])))
self.KappaSqDis(x1_x2, x1_x2, dot_x1[lower_limit:upper_limit], dot_x2)
output[lower_limit:upper_limit,] = numpy.transpose(y1[lower_limit:upper_limit,0] * numpy.transpose(y2[:,0] * x1_x2))
lower_limit = upper_limit
if lower_limit <= n1:
x1_x2 = numpy.transpose(numpy.dot(x2, numpy.transpose(x1[lower_limit:n1,])))
self.KappaSqDis(x1_x2, x1_x2, dot_x1[lower_limit:n1], dot_x2)
output[lower_limit:n1,] = numpy.transpose(y1[lower_limit:n1,0] * numpy.transpose(y2[:,0] * x1_x2))
return output
## Compute the kernel between the data points in x1 and those in x2,
# then multiply the resulting kernel matrix elementwiesely by the
# the outer-product matrix between y1 and y2, and final multiply
# the resulting matrix by alpha2. It returns a matrix with entry $(ij)$
# equal to $sum_r K(x1_i,x2_r) \times (y1_i \times y1_r) \times alpha2_r$.
# Other parameters are defined similarly as those in Dot.
# @param x1 [read] The first set of data points.
# @param y1 [read] The first set of labels.
# @param x2 [read] The second set of data points.
# @param y2 [read] The second set of labels.
# @param index1 [read] The indices into the first set of data points.
# @param index2 [read] The indices into the second set of data points.
# @param output [write] The buffer where the output matrix is written into.
#
def TensorExpand(self, x1, y1, x2, y2, alpha2, index1=None, index2=None, output=None):
assert len(x1.shape) == 2, 'Argument 1 has wrong shape'
assert len(y1.shape) == 2, 'Argument 2 has wrong shape'
assert x1.shape[0] == y1.shape[0], \
'Argument 1 and 2 have different dimensions'
assert len(x2.shape) == 2, 'Argument 3 has wrong shape'
assert len(y2.shape) == 2, 'Argument 4 has wrong shape'
assert x2.shape[0] == y2.shape[0], \
'Argument 3 and 4 have different dimensions'
assert len(alpha2.shape) == 2, 'Argument 5 has wrong shape'
assert x2.shape[0] == alpha2.shape[0], \
'Argument 3 and 5 have different number of data points'
# retrieve remembered data from the cache
flg1 = False
if self._cacheData.has_key(id(x1)):
dot_x1 = self._cacheData[id(x1)]
flg1 = True
flg2 = False
if self._cacheData.has_key(id(x2)):
dot_x2 = self._cacheData[id(x2)]
flg2 = True
if index1 is not None:
x1 = x1[index1,]
if flg1 is True:
dot_x1 = dot_x1[index1,]
else:
dot_x1 = numpy.sum(x1**2, 1)
y1 = y1[index1,]
else:
if flg1 is False:
dot_x1 = numpy.sum(x1**2, 1)
if index2 is not None:
x2 = x2[index2,]
if flg2 is True:
dot_x2 = dot_x2[index2,]
else:
dot_x2 = numpy.sum(x2**2, 1)
y2 = y2[index2,]
alpha2 = alpha2[index2,]
else:
if flg2 is False:
dot_x2 = numpy.sum(x2**2, 1)
n1 = x1.shape[0]
nb = n1 / self._blocksize
n2 = alpha2.shape[1]
if output is None:
output = numpy.zeros((n1,n2), numpy.float64)
# handle special cases:
if index2 is not None:
if len(index2) <= self._blocksize:
x1_x2 = numpy.dot(x1, numpy.transpose(x2))
self.KappaSqDis(x1_x2, x1_x2, dot_x1, dot_x2)
output = numpy.transpose(y1[:,0] * numpy.transpose(numpy.dot(y2[:,0]*x1_x2, alpha2)))
return output
# blocking
lower_limit = 0
upper_limit = 0
for i in range(nb):
upper_limit = upper_limit + self._blocksize
x1_x2 = numpy.transpose(numpy.dot(x2, numpy.transpose(x1[lower_limit:upper_limit,])))
self.KappaSqDis(x1_x2, x1_x2, dot_x1[lower_limit:upper_limit], dot_x2)
output[lower_limit:upper_limit,] = numpy.transpose(y1[lower_limit:upper_limit,0] * numpy.transpose(numpy.dot(y2[:,0]*x1_x2, alpha2)))
lower_limit = upper_limit
if lower_limit <= n1:
x1_x2 = numpy.transpose(numpy.dot(x2, numpy.transpose(x1[lower_limit:n1,])))
self.KappaSqDis(x1_x2, x1_x2, dot_x1[lower_limit:n1], dot_x2)
output[lower_limit:n1,] = numpy.transpose(y1[lower_limit:n1,0] * numpy.transpose(numpy.dot(y2[:,0]*x1_x2, alpha2)))
return output
## Create the cache for the base part of the kernel computed for
# data x, and index them by the id of x. If x have already been
# remembered, the old stored information is simply overwritten.
# @param x [read] The data whose base part is to be cached.
#
def CreateCacheKernel(self, x):
assert len(x.shape) == 2, 'Argument 1 has wrong shape'
n = x.shape[0]
nb = n / self._blocksize
# create the cache space
if self._cacheKernel.has_key(id(x)):
self.ClearCacheKernel(x)
tmpCacheKernel = numpy.zeros((n,n), numpy.float64)
self._cacheKernel[id(x)] = tmpCacheKernel
if self._cacheData.has_key(id(x)):
dot_x = self._cacheData[id(x)]
else:
dot_x = numpy.sum(x**2,1)
# blocking
lower_limit = 0
upper_limit = 0
for i in range(nb):
upper_limit = upper_limit + self._blocksize
tmpCacheKernel[lower_limit:upper_limit,] = numpy.transpose(numpy.dot(x, numpy.transpose(x[lower_limit:upper_limit,])))
tmpCacheKernel[lower_limit:upper_limit,] = numpy.add.outer(dot_x[lower_limit:upper_limit], dot_x) \
- 2*tmpCacheKernel[lower_limit:upper_limit,]
lower_limit = upper_limit
if lower_limit <= n:
tmpCacheKernel[lower_limit:n,] = numpy.transpose(numpy.dot(x, numpy.transpose(x[lower_limit:n,])))
tmpCacheKernel[lower_limit:n,] = numpy.add.outer(dot_x[lower_limit:n], dot_x) \
- 2*tmpCacheKernel[lower_limit:n,]
return True
## Dot product of x with itself with the cached base part of the kernel.
# If param is given, the kernel matrix is computed using
# the given parameter and the current base part. Otherwise, the old
# parameters are used.
# @param x The data set.
# @param param The new parameters.
# @param output The output buffer.
#
def DotCacheKernel(self, x, param=None, output=None):
assert len(x.shape)==2, 'Argument 1 has wrong shape'
assert self._cacheKernel.has_key(id(x)) == True, \
'Argument 1 has not been cached'
n = x.shape[0]
nb = n / self._blocksize
tmpCacheKernel = self._cacheKernel[id(x)]
# set parameters.
if param is not None:
self.SetParam(param)
if output is None:
output = numpy.zeros((n,n), numpy.float64)
# blocking
lower_limit = 0
upper_limit = 0
for i in range(nb):
upper_limit = upper_limit + self._blocksize
output[lower_limit:upper_limit,] = self.Kappa(tmpCacheKernel[lower_limit:upper_limit,])
lower_limit = | |
rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_116(self):
inp = '''-100.'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_117(self):
inp = '''1000.'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_118(self):
inp = '''-1000.'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_119(self):
inp = '''10000.'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_120(self):
inp = '''-10000.'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_121(self):
inp = '''100000.'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_122(self):
inp = '''-100000.'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_123(self):
inp = '''123456789.'''
fmt = '''(E5.4E1)'''
result = [1.2344999999999999e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_124(self):
inp = '''0.1'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_125(self):
inp = '''-0.1'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_126(self):
inp = '''0.01'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_127(self):
inp = '''-0.01'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_128(self):
inp = '''0.001'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_129(self):
inp = '''-0.001'''
fmt = '''(E5.4E1)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_130(self):
inp = '''0.0001'''
fmt = '''(E5.4E1)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_131(self):
inp = '''-0.0001'''
fmt = '''(E5.4E1)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_132(self):
inp = '''-1.96e-16'''
fmt = '''(E5.4E1)'''
result = [-1.9600000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_133(self):
inp = '''3.14159'''
fmt = '''(E5.4E1)'''
result = [3.1410000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_134(self):
inp = '''- 1.0'''
fmt = '''(E5.4E1)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_135(self):
inp = '''1e12'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_136(self):
inp = '''1E12'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_137(self):
inp = '''-1 e12'''
fmt = '''(E5.4E1)'''
result = [-1.0000000000000000e-04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_138(self):
inp = '''.'''
fmt = '''(E5.4E1)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_139(self):
inp = '''.1'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_140(self):
inp = '''0.1D+200'''
fmt = '''(E5.4E1)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_141(self):
inp = '''3.'''
fmt = '''(E10.4E1)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_142(self):
inp = '''-3.'''
fmt = '''(E10.4E1)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_143(self):
inp = '''10.'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_144(self):
inp = '''-10.'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_145(self):
inp = '''100.'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_146(self):
inp = '''-100.'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_147(self):
inp = '''1000.'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_148(self):
inp = '''-1000.'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_149(self):
inp = '''10000.'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_150(self):
inp = '''-10000.'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_151(self):
inp = '''100000.'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+05]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_152(self):
inp = '''-100000.'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+05]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_153(self):
inp = '''123456789.'''
fmt = '''(E10.4E1)'''
result = [1.2345678900000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_154(self):
inp = '''0.1'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_155(self):
inp = '''-0.1'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_156(self):
inp = '''0.01'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_157(self):
inp = '''-0.01'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e-02]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_158(self):
inp = '''0.001'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_159(self):
inp = '''-0.001'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e-03]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_160(self):
inp = '''0.0001'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e-04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_161(self):
inp = '''-0.0001'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e-04]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_162(self):
inp = '''-1.96e-16'''
fmt = '''(E10.4E1)'''
result = [-1.9600000000000000e-16]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_163(self):
inp = '''3.14159'''
fmt = '''(E10.4E1)'''
result = [3.1415899999999999e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_164(self):
inp = '''- 1.0'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_165(self):
inp = '''1e12'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_166(self):
inp = '''1E12'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_167(self):
inp = '''-1 e12'''
fmt = '''(E10.4E1)'''
result = [-1.0000000000000000e+08]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_168(self):
inp = '''.'''
fmt = '''(E10.4E1)'''
result = [0.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_169(self):
inp = '''.1'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000001e-01]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_170(self):
inp = '''0.1D+200'''
fmt = '''(E10.4E1)'''
result = [1.0000000000000001e+199]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_171(self):
inp = '''3.'''
fmt = '''(E5.5E1)'''
result = [3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_172(self):
inp = '''-3.'''
fmt = '''(E5.5E1)'''
result = [-3.0000000000000000e+00]
eds, rev_eds = _parser(_lexer(fmt))
self.assertEqual(result, _input(eds, rev_eds, inp))
@attr(platform='9-1_linux_intel')
@attr('input')
@attr(ed='E')
def test_e_ed_input_173(self):
inp = '''10.'''
fmt = '''(E5.5E1)'''
result = [1.0000000000000000e+01]
eds, | |
| wx.CANCEL)
return result.returnedString
def doExit(self):
if self.components.document.GetModify():
save = self.saveChanges()
if save == "Cancel":
return False
elif save == "No":
return True
else:
if self.documentPath is None:
return self.on_menuFileSaveAs_select(None)
else:
self.saveFile(self.documentPath)
return True
else:
return True
def on_close(self, event):
if self.doExit():
try:
# KEA 2004-04-08
# if an exception occurs during on_initialize
# then saveConfig could fail because some windows
# might not exist, so in that situation just exit gracefully
self.saveConfig()
except:
pass
self.fileHistory = None
self.printer = None
event.skip()
def on_menuFileSave_select(self, event):
if self.documentPath is None:
# this a "new" document and needs to go through Save As...
self.on_menuFileSaveAs_select(None)
else:
self.saveFile(self.documentPath)
def on_menuFileSaveAs_select(self, event):
#wildcard = "Python scripts (*.py;*.pyw)|*.py;*.pyw|Text files (*.txt)|*.txt|All files (*.*)|*.*"
wildcard = self.resource.strings.saveAsWildcard
if self.documentPath is None:
dir = ''
filename = '*.py'
else:
dir = os.path.dirname(self.documentPath)
filename = os.path.basename(self.documentPath)
result = dialog.saveFileDialog(None, self.resource.strings.saveAs, dir, filename, wildcard)
if result.accepted:
path = result.paths[0]
self.saveFile(path)
self.fileHistory.AddFileToHistory(path)
return True
else:
return False
def newFile(self):
self.components.document.text = ''
self.documentPath = None
self.setEditorStyle()
self.components.document.SetSavePoint()
self.title = self.resource.strings.untitled + ' - ' + self.startTitle
#self.statusBar.text = self.resource.strings.untitled
self.lastStatus = None
# KEA 2003-07-26
# reset EOL to match platform
# this may not actually be what the user expects
# so perhaps this should be an option in a dialog?!
self.autoSetEOL()
def openFile(self, path):
try:
self.components.document.SetUndoCollection(0)
self.components.document.ClearAll()
f = open(path, 'rb')
try:
self.components.document.text = f.read()
finally:
f.close()
self.documentPath = path
os.chdir(os.path.dirname(self.documentPath))
self.components.document.EmptyUndoBuffer()
self.components.document.SetUndoCollection(1)
self.components.document.SetSavePoint()
self.title = os.path.split(path)[-1] + ' - ' + self.startTitle
#self.statusBar.text = path
self.lastStatus = None
self.fileHistory.AddFileToHistory(path)
# KEA 2002-06-29
# just as a test, let's see how the XML and/or HTML styles
# look
self.setEditorStyle()
self.autoSetEOL()
except:
pass
def saveFile(self, path):
try:
f = open(path, 'wb')
try:
f.write(self.components.document.text)
finally:
f.close()
self.documentPath = path
os.chdir(os.path.dirname(self.documentPath))
self.components.document.SetSavePoint()
self.title = os.path.split(path)[-1] + ' - ' + self.startTitle
#self.statusBar.text = path
self.lastStatus = None
self.setEditorStyle()
except:
pass
# KEA 2003-07-26
def autoSetEOL(self):
"""
when opening an existing file
automatically set the EOL mode to
match the current line endings for the file
if the document is empty then set EOL to
the original EOL state
"""
doc = self.components.document
if doc.GetLength():
line = doc.GetLine(0)
else:
line = os.linesep
if line.endswith('\r\n'):
doc.SetEOLMode(stc.STC_EOL_CRLF)
elif line.endswith('\n'):
doc.SetEOLMode(stc.STC_EOL_LF)
elif line.endswith('\r'):
doc.SetEOLMode(stc.STC_EOL_CR)
# File menu
# KEA 2002-05-04
# need to decide on UI for multiple windows
# New Window, Open in New Window, New, Open, etc.
# since we aren't doing MDI
# we could have child windows, but what would the organization be?!
def on_menuFileNewWindow_select(self, event):
app = os.path.split(sys.argv[0])[-1]
filename = os.path.join(self.application.applicationDirectory, app)
python = sys.executable
if ' ' in python:
pythonQuoted = '"' + python + '"'
else:
pythonQuoted = python
os.spawnv(os.P_NOWAIT, python, [pythonQuoted, filename])
# for this to work, all the windows need to share a common list of windows
# a File->Exit would iterate through each?
"""
path = os.path.join(self.application.applicationDirectory, 'codeEditor')
rsrc = resource.ResourceFile(model.internationalResourceName(path)).getResource()
self.childWindow = CodeEditor(self, rsrc.application.backgrounds[0])
"""
def on_menuFileNew_select(self, event):
if self.components.document.GetModify():
save = self.saveChanges()
if save == "Cancel":
# don't do anything, just go back to editing
pass
elif save == "No":
# any changes will be lost
self.newFile()
else:
if self.documentPath is None:
if self.on_menuFileSaveAs_select(None):
self.newFile()
else:
self.saveFile(self.documentPath)
self.newFile()
else:
# don't need to save
self.newFile()
def on_menuFileOpen_select(self, event):
if self.components.document.GetModify():
save = self.saveChanges()
if save == "Cancel":
# don't do anything, just go back to editing
return
elif save == "No":
# any changes will be lost
pass
else:
if self.documentPath is None:
# if the user cancels out of the Save As then go back to editing
if not self.on_menuFileSaveAs_select(None):
return
else:
self.saveFile(self.documentPath)
# split this method into several pieces to make it more flexible
#wildcard = "Python scripts (*.py;*.pyw)|*.py;*.pyw|Text files (*.txt)|*.txt|All files (*.*)|*.*"
wildcard = self.resource.strings.saveAsWildcard
result = dialog.openFileDialog(None, self.resource.strings.openFile, '', '', wildcard)
if result.accepted:
path = result.paths[0]
# an error will probably occur here if the text is too large
# to fit in the wxTextCtrl (TextArea) or the file is actually
# binary. Not sure what happens with CR/LF versus CR versus LF
# line endings either
self.openFile(path)
def on_menuFilePrint_select(self, event):
source = textToHtml(self.components.document.text)
self.printer.PrintText(source)
def on_menuFilePrintPreview_select(self, event):
source = textToHtml(self.components.document.text)
self.printer.PreviewText(source)
def on_menuFilePageSetup_select(self, event):
self.printer.PageSetup()
# Edit menu
def on_menuEditUndo_select(self, event):
widget = self.findFocus()
if hasattr(widget, 'editable') and widget.CanUndo():
widget.Undo()
def on_menuEditRedo_select(self, event):
widget = self.findFocus()
if hasattr(widget, 'editable') and widget.CanRedo():
widget.Redo()
def on_menuEditCut_select(self, event):
widget = self.findFocus()
# KEA 2002-05-03
# no CanCut() method?
if hasattr(widget, 'editable'):
widget.Cut()
def on_menuEditCopy_select(self, event):
widget = self.findFocus()
# KEA 2002-05-03
# no CanCopy() method?
if hasattr(widget, 'editable'):
widget.Copy()
def on_menuEditPaste_select(self, event):
widget = self.findFocus()
if hasattr(widget, 'editable') and widget.CanPaste():
widget.Paste()
def on_menuEditClear_select(self, event):
widget = self.findFocus()
if hasattr(widget, 'editable'):
widget.ClearSelection()
def on_menuEditSelectAll_select(self, event):
widget = self.findFocus()
if hasattr(widget, 'editable'):
widget.SelectAll()
def findNext(self, searchText, wholeWordsOnly, caseSensitive):
if searchText == '':
return -1
doc = self.components.document
current = doc.GetCurrentPos()
last = doc.GetLength()
if wx.VERSION >= (2, 3, 3):
flags = 0
if caseSensitive:
flags = flags + stc.STC_FIND_MATCHCASE
if wholeWordsOnly:
flags = flags + stc.STC_FIND_WHOLEWORD
result = doc.FindText(current, last, searchText, flags)
else:
result = doc.FindText(current, last, searchText,
caseSensitive,
wholeWordsOnly)
if result != -1:
# update the selection, which also changes the cursor position
n = len(searchText)
doc.SetSelection(result, result + n)
else:
# should we beep or flash the screen or present an error dialog?
pass
return result
def on_doEditFindReplace_command(self, event):
data = wx.FindReplaceData()
flags = data.GetFlags()
data.SetFindString(self.lastFind['searchText'])
data.SetReplaceString(self.lastFind['replaceText'])
if self.lastFind['wholeWordsOnly']:
flags = flags | wx.FR_WHOLEWORD
if self.lastFind['caseSensitive']:
flags = flags | wx.FR_MATCHCASE
data.SetFlags(flags)
dlg = wx.FindReplaceDialog(self, data, "Find & Replace", wx.FR_REPLACEDIALOG)
dlg.data = data # save a reference to it...
# KEA 2004-04-18
# can't use visible attribute
# probably need to create a wrapper for FindReplaceDialog
# to make it more like PythonCard
#dlg.visible = True
dlg.Show()
def on_doEditReplaceTabs_command(self, event):
self.replaceTabs()
def on_doEditFind_command(self, event):
# keep track of the last find and preload
# the search text and radio buttons
lastFind = self.lastFind
result = dialog.findDialog(self, lastFind['searchText'],
lastFind['wholeWordsOnly'],
lastFind['caseSensitive'])
if result.accepted:
lastFind['searchText'] = result.searchText
lastFind['wholeWordsOnly'] = result.wholeWordsOnly
lastFind['caseSensitive'] = result.caseSensitive
self.findNext(lastFind['searchText'],
lastFind['wholeWordsOnly'],
lastFind['caseSensitive'])
def on_doEditFindNext_command(self, event):
self.findNext(self.lastFind['searchText'],
self.lastFind['wholeWordsOnly'],
self.lastFind['caseSensitive'])
def gotoLine(self, lineNumber):
try:
# GotoLine is zero based, but we ask the user
# for a line number starting at 1
self.components.document.GotoLine(lineNumber - 1)
except:
pass
def on_doEditGoTo_command(self, event):
result = dialog.textEntryDialog(self, self.resource.strings.gotoLineNumber, self.resource.strings.gotoLine, '')
# this version doesn't alert the user if the line number is out-of-range
# it just fails quietly
if result.accepted:
try:
self.gotoLine(int(result.text))
except:
pass
def on_indentRegion_command(self, event):
self.components.document.CmdKeyExecute(stc.STC_CMD_TAB)
def on_dedentRegion_command(self, event):
self.components.document.CmdKeyExecute(stc.STC_CMD_BACKTAB)
def on_commentRegion_command(self, event):
# need to do the equivelant of the IDLE
# comment_region_event in AutoIndent.py
doc = self.components.document
sel = doc.GetSelection()
start = doc.LineFromPosition(sel[0])
end = doc.LineFromPosition(sel[1])
if end > start and doc.GetColumn(sel[1]) == 0:
end = end - 1
doc.BeginUndoAction()
for lineNumber in range(start, end + 1):
firstChar = doc.PositionFromLine(lineNumber)
doc.InsertText(firstChar, '##')
doc.SetCurrentPos(doc.PositionFromLine(start))
doc.SetAnchor(doc.GetLineEndPosition(end))
doc.EndUndoAction()
def on_uncommentRegion_command(self, event):
# need to do the equivelant of the IDLE
# uncomment_region_event in AutoIndent.py
doc = self.components.document
sel = doc.GetSelection()
start = doc.LineFromPosition(sel[0])
end = doc.LineFromPosition(sel[1])
if end > start and doc.GetColumn(sel[1]) == 0:
end = end - 1
doc.BeginUndoAction()
for lineNumber in range(start, end + 1):
firstChar = doc.PositionFromLine(lineNumber)
if chr(doc.GetCharAt(firstChar)) == '#':
if chr(doc.GetCharAt(firstChar + 1)) == '#':
# line starts with ##
doc.SetCurrentPos(firstChar + 2)
else:
# line starts with #
doc.SetCurrentPos(firstChar + 1)
doc.DelLineLeft()
doc.SetCurrentPos(doc.PositionFromLine(start))
doc.SetAnchor(doc.GetLineEndPosition(end))
doc.EndUndoAction()
# View menu
def on_menuViewWhitespace_select(self, event):
self.components.document.SetViewWhiteSpace(event.IsChecked())
def on_menuViewIndentationGuides_select(self, event):
self.components.document.SetIndentationGuides(event.IsChecked())
def on_menuViewRightEdgeIndicator_select(self, event):
if event.IsChecked():
self.components.document.SetEdgeMode(stc.STC_EDGE_LINE)
#self.components.document.SetEdgeMode(stc.STC_EDGE_BACKGROUND)
else:
self.components.document.SetEdgeMode(stc.STC_EDGE_NONE)
def on_menuViewEndOfLineMarkers_select(self, event):
self.components.document.SetViewEOL(event.IsChecked())
def on_menuViewFixedFont_select(self, event):
pass
def on_menuViewLineNumbers_select(self, event):
self.components.document.lineNumbersVisible = event.IsChecked()
def on_menuViewCodeFolding_select(self, event):
self.components.document.codeFoldingVisible = event.IsChecked()
# Format menu
def on_doSetStyles_command(self, event):
config = configuration.getStyleConfigPath()
if config is None:
return
cwd = os.curdir
os.chdir(os.path.dirname(config))
dlg = STCStyleEditor.STCStyleEditDlg(self,
'Python', 'python',
#'HTML', 'html',
| |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import asyncio
import logging
import math
from typing import Any, DefaultDict, Dict, List, Optional
from fbpcp.entity.mpc_instance import MPCInstance, MPCInstanceStatus, MPCParty
from fbpcp.service.mpc import MPCService
from fbpcp.service.onedocker import OneDockerService
from fbpcp.service.storage import StorageService
from fbpmp.data_processing.attribution_id_combiner.attribution_id_spine_combiner_cpp import (
CppAttributionIdSpineCombinerService,
)
from fbpmp.data_processing.sharding.sharding import ShardType
from fbpmp.data_processing.sharding.sharding_cpp import CppShardingService
from fbpmp.onedocker_binary_config import OneDockerBinaryConfig
from fbpmp.onedocker_binary_names import OneDockerBinaryNames
from fbpmp.pid.entity.pid_instance import PIDInstance, PIDInstanceStatus
from fbpmp.pid.entity.pid_instance import PIDProtocol, PIDRole
from fbpmp.pid.service.pid_service.pid import PIDService
from fbpmp.pid.service.pid_service.pid_stage import PIDStage
from fbpmp.private_attribution.entity.private_attribution_instance import (
PrivateAttributionInstance,
PrivateAttributionInstanceStatus,
PrivateAttributionRole,
UnionedPAInstance,
UnionedPAInstanceStatus,
)
from fbpmp.private_attribution.repository.private_attribution_instance import (
PrivateAttributionInstanceRepository,
)
"""
43200 s = 12 hrs
We want to be conservative on this timeout just in case:
1) partner side is not able to connect in time. This is possible because it's a manual process
to run partner containers and humans can be slow;
2) during development, we add logic or complexity to the binaries running inside the containers
so that they take more than a few hours to run.
"""
DEFAULT_CONTAINER_TIMEOUT_IN_SEC = 43200
class PrivateAttributionService:
MAX_ROWS_PER_PID_CONTAINER = 10000000
TARGET_ROWS_PER_MPC_SHARD = 50000
APPROX_BYTES_PER_PUBLISHER_ROW = 38
def __init__(
self,
instance_repository: PrivateAttributionInstanceRepository,
mpc_svc: MPCService,
pid_svc: PIDService,
onedocker_svc: OneDockerService,
onedocker_binary_config_map: DefaultDict[str, OneDockerBinaryConfig],
storage_svc: StorageService,
) -> None:
"""Constructor of PrivateAttributionService
instance_repository -- repository to CRUD PrivateAttributeInstance
"""
self.instance_repository = instance_repository
self.storage_svc = storage_svc
self.mpc_svc = mpc_svc
self.pid_svc = pid_svc
self.onedocker_svc = onedocker_svc
self.onedocker_binary_config_map = onedocker_binary_config_map
self.logger: logging.Logger = logging.getLogger(__name__)
def create_instance(
self,
instance_id: str,
role: PrivateAttributionRole,
input_path: str,
output_dir: str,
hmac_key: str,
num_pid_containers: int,
num_mpc_containers: int,
num_files_per_mpc_container: int,
padding_size: int,
logger: logging.Logger,
concurrency: int = 1,
k_anonymity_threshold: int = 0,
) -> PrivateAttributionInstance:
self.logger.info(f"Creating instance: {instance_id}")
instance = PrivateAttributionInstance(
instance_id=instance_id,
role=role,
instances=[],
status=PrivateAttributionInstanceStatus.CREATED,
input_path=input_path,
output_dir=output_dir,
hmac_key=hmac_key,
num_pid_containers=num_pid_containers,
num_mpc_containers=num_mpc_containers,
num_files_per_mpc_container=num_files_per_mpc_container,
padding_size=padding_size,
concurrency=concurrency,
k_anonymity_threshold=k_anonymity_threshold,
)
self.instance_repository.create(instance)
return instance
def update_instance(self, instance_id: str) -> PrivateAttributionInstance:
pa_instance = self.instance_repository.read(instance_id)
self.logger.info(f"Updating instance: {instance_id}")
if pa_instance.instances:
# Only need to update the last stage/instance
last_instance = pa_instance.instances[-1]
if isinstance(last_instance, PIDInstance):
# PID service has to call update_instance to get the newest containers
# information in case they are still running
pa_instance.instances[-1] = self.pid_svc.update_instance(
last_instance.instance_id
)
elif isinstance(last_instance, MPCInstance):
# MPC service has to call update_instance to get the newest containers
# information in case they are still running
pa_instance.instances[-1] = self.mpc_svc.update_instance(
last_instance.instance_id
)
else:
raise ValueError("Unknow type of instance")
pa_instance.status = (
self._get_status_from_stage(pa_instance.instances[-1])
or pa_instance.status
)
self.instance_repository.update(pa_instance)
self.logger.info(f"Finished updating instance: {instance_id}")
return pa_instance
# PID stage
def id_match(
self,
instance_id: str,
protocol: PIDProtocol,
pid_config: Dict[str, Any],
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = False,
) -> PrivateAttributionInstance:
return asyncio.run(
self.id_match_async(
instance_id=instance_id,
protocol=protocol,
pid_config=pid_config,
server_ips=server_ips,
dry_run=dry_run,
)
)
# Make an async version of id_match() so that it can be called by Thrift
async def id_match_async(
self,
instance_id: str,
protocol: PIDProtocol,
pid_config: Dict[str, Any],
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = False,
) -> PrivateAttributionInstance:
# Get the updated instance
pa_instance = self.update_instance(instance_id)
if pa_instance.role is PrivateAttributionRole.PARTNER and not server_ips:
raise ValueError("Missing server_ips for Partner")
# default to be an empty string
retry_counter_str = ""
# Validate status of the instance
if pa_instance.status is PrivateAttributionInstanceStatus.CREATED:
pa_instance.retry_counter = 0
elif pa_instance.status is PrivateAttributionInstanceStatus.ID_MATCHING_FAILED:
pa_instance.retry_counter += 1
retry_counter_str = str(pa_instance.retry_counter)
elif pa_instance.status in [
PrivateAttributionInstanceStatus.ID_MATCHING_STARTED,
PrivateAttributionInstanceStatus.COMPUTATION_STARTED,
PrivateAttributionInstanceStatus.AGGREGATION_STARTED,
]:
# Whether this is a normal run or a test run with dry_run=True, we would like to make sure that
# the instance is no longer in a running state before starting a new operation
raise ValueError(
f"Cannot start a new operation when instance {instance_id} has status {pa_instance.status}."
)
elif not dry_run:
raise ValueError(
f"Instance {instance_id} has status {pa_instance.status}. Not ready for id matching."
)
# Create a new pid instance
pid_instance_id = instance_id + "_id_match" + retry_counter_str
pid_instance = self.pid_svc.create_instance(
instance_id=pid_instance_id,
protocol=PIDProtocol.UNION_PID,
pid_role=self._map_pa_role_to_pid_role(pa_instance.role),
num_shards=pa_instance.num_pid_containers,
input_path=pa_instance.input_path,
output_path=pa_instance.pid_stage_output_base_path,
hmac_key=pa_instance.hmac_key,
)
# Push PID instance to PrivateAttributionInstance.instances and update PA Instance status
pid_instance.status = PIDInstanceStatus.STARTED
pa_instance.instances.append(pid_instance)
pid_instance.spine_path = pa_instance.spine_path
pid_instance.data_path = pa_instance.pid_stage_out_data_path
pa_instance.status = PrivateAttributionInstanceStatus.ID_MATCHING_STARTED
self.instance_repository.update(pa_instance)
pa_instance = self.update_instance(instance_id)
# Run pid
# With the current design, it won't return until everything is done
await self.pid_svc.run_instance(
instance_id=pid_instance_id,
pid_config=pid_config,
server_ips=server_ips,
)
pa_instance = self.update_instance(instance_id)
return pa_instance
def prepare_data(
self,
instance_id: str,
dry_run: Optional[bool] = None,
log_cost_to_s3: bool = False,
) -> List[str]:
return asyncio.run(
self.prepare_data_async(
instance_id=instance_id,
dry_run=dry_run,
log_cost_to_s3=log_cost_to_s3,
)
)
async def prepare_data_async(
self,
instance_id: str,
dry_run: Optional[bool] = None,
log_cost_to_s3: bool = False,
) -> List[str]:
self.logger.info(f"[{self}] Starting CppAttributionIdSpineCombinerService")
# Get the updated instance
pa_instance = self.update_instance(instance_id)
# Validate status of the instance
if not dry_run and (
pa_instance.status
is not PrivateAttributionInstanceStatus.ID_MATCHING_COMPLETED
):
raise ValueError(
f"Instance {instance_id} has status {pa_instance.status}. Not ready for data prep stage."
)
output_path = pa_instance.data_processing_output_path
combine_output_path = output_path + "_combine"
# execute combiner step
combiner_service = CppAttributionIdSpineCombinerService()
binary_config = self.onedocker_binary_config_map[
OneDockerBinaryNames.ATTRIBUTION_ID_SPINE_COMBINER.value
]
await combiner_service.combine_on_container_async(
spine_path=pa_instance.spine_path,
data_path=pa_instance.pid_stage_out_data_path,
output_path=combine_output_path,
num_shards=pa_instance.num_pid_containers,
run_name=pa_instance.instance_id if log_cost_to_s3 else "",
onedocker_svc=self.onedocker_svc,
tmp_directory=binary_config.tmp_directory,
padding_size=pa_instance.padding_size,
binary_version=binary_config.binary_version,
)
logging.info("Finished running CombinerService, starting to reshard")
# reshard each file into x shards
# note we need each file to be sharded into the same # of files
# because we want to keep the data of each existing file to run
# on the same container
sharder = CppShardingService()
logging.info("Instantiated sharder")
all_output_paths = []
coros = []
for shard_index in range(pa_instance.num_pid_containers):
path_to_shard = PIDStage.get_sharded_filepath(
combine_output_path, shard_index
)
shards_per_file = math.ceil(
(pa_instance.num_mpc_containers / pa_instance.num_pid_containers)
* pa_instance.num_files_per_mpc_container
)
logging.info(f"Input path to sharder: {path_to_shard}")
shard_index_offset = shard_index * shards_per_file
logging.info(
f"Output base path to sharder: {output_path}, {shard_index_offset=}"
)
binary_config = self.onedocker_binary_config_map[
OneDockerBinaryNames.SHARDER.value
]
coro = sharder.shard_on_container_async(
shard_type=ShardType.ROUND_ROBIN,
filepath=path_to_shard,
output_base_path=output_path,
file_start_index=shard_index_offset,
num_output_files=shards_per_file,
onedocker_svc=self.onedocker_svc,
binary_version=binary_config.binary_version,
tmp_directory=binary_config.tmp_directory,
)
coros.append(coro)
# Wait for all coroutines to finish
await asyncio.gather(*coros)
logging.info("All sharding coroutines finished")
return all_output_paths
def _validate_compute_attribute_inputs(
self,
pa_instance: PrivateAttributionInstance,
server_ips: Optional[List[str]],
dry_run: Optional[bool],
) -> str:
if pa_instance.role is PrivateAttributionRole.PARTNER and not server_ips:
raise ValueError("Missing server_ips")
# default to be an empty string
retry_counter_str = ""
# Validate status of the instance
if pa_instance.status is PrivateAttributionInstanceStatus.ID_MATCHING_COMPLETED:
pa_instance.retry_counter = 0
elif pa_instance.status in [
PrivateAttributionInstanceStatus.COMPUTATION_FAILED,
PrivateAttributionInstanceStatus.COMPUTATION_STARTED,
PrivateAttributionInstanceStatus.COMPUTATION_COMPLETED,
]:
pa_instance.retry_counter += 1
retry_counter_str = str(pa_instance.retry_counter)
elif not dry_run:
raise ValueError(
f"Instance {pa_instance.instance_id} has status {pa_instance.status}. Not ready for computing metrics."
)
return retry_counter_str
async def _create_and_start_mpc_instance(
self,
instance_id: str,
game_name: str,
mpc_party: MPCParty,
num_containers: int,
binary_version: str,
server_ips: Optional[List[str]] = None,
game_args: Optional[List[Dict[str, Any]]] = None,
container_timeout: Optional[int] = None,
) -> MPCInstance:
timeout = container_timeout or DEFAULT_CONTAINER_TIMEOUT_IN_SEC
self.mpc_svc.create_instance(
instance_id=instance_id,
game_name=game_name,
mpc_party=mpc_party,
num_workers=num_containers,
game_args=game_args,
)
return await self.mpc_svc.start_instance_async(
instance_id=instance_id,
server_ips=server_ips,
version=binary_version,
timeout=timeout,
)
def compute_attribute(
self,
instance_id: str,
game_name: str,
attribution_rule: str,
aggregation_type: str,
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = None,
log_cost_to_s3: bool = False,
container_timeout: Optional[int] = None,
) -> PrivateAttributionInstance:
return asyncio.run(
self.compute_attribute_async(
instance_id=instance_id,
game_name=game_name,
attribution_rule=attribution_rule,
aggregation_type=aggregation_type,
server_ips=server_ips,
dry_run=dry_run,
log_cost_to_s3=log_cost_to_s3,
container_timeout=container_timeout,
)
)
async def compute_attribute_async(
self,
instance_id: str,
game_name: str,
attribution_rule: str,
aggregation_type: str,
server_ips: Optional[List[str]] = None,
dry_run: Optional[bool] = None,
log_cost_to_s3: bool = False,
container_timeout: Optional[int] = None,
) -> PrivateAttributionInstance:
# Get the updated instance
pa_instance = self.update_instance(instance_id)
retry_counter_str = self._validate_compute_attribute_inputs(
pa_instance, server_ips, dry_run
)
logging.info("Starting to run MPC instance.")
# Create and start MPC instance
game_args = [
{
"aggregators": aggregation_type,
"input_base_path": pa_instance.data_processing_output_path,
"output_base_path": pa_instance.compute_stage_output_base_path,
"attribution_rules": attribution_rule,
"concurrency": pa_instance.concurrency,
"num_files": pa_instance.num_files_per_mpc_container,
"file_start_index": i * pa_instance.num_files_per_mpc_container,
"use_xor_encryption": True,
"run_name": pa_instance.instance_id if log_cost_to_s3 else "",
"max_num_touchpoints": pa_instance.padding_size,
"max_num_conversions": pa_instance.padding_size,
}
for i in range(pa_instance.num_mpc_containers)
]
binary_config = self.onedocker_binary_config_map[
OneDockerBinaryNames.ATTRIBUTION_COMPUTE.value
]
mpc_instance = await self._create_and_start_mpc_instance(
instance_id=instance_id + "_compute_metrics" + retry_counter_str,
game_name=game_name,
mpc_party=self._map_pa_role_to_mpc_party(pa_instance.role),
num_containers=pa_instance.num_mpc_containers,
binary_version=binary_config.binary_version,
server_ips=server_ips,
game_args=game_args,
container_timeout=container_timeout,
)
logging.info("Finished running MPC instance.")
# Push MPC instance to PrivateAttributionInstance.instances and update PL Instance status
pa_instance.instances.append(mpc_instance)
pa_instance.status = PrivateAttributionInstanceStatus.COMPUTATION_STARTED
self.instance_repository.update(pa_instance)
return pa_instance
def _validate_aggregate_shards_inputs(
self,
pa_instance: PrivateAttributionInstance,
server_ips: Optional[List[str]],
dry_run: Optional[bool],
) -> str:
if pa_instance.role is PrivateAttributionRole.PARTNER and not server_ips:
raise ValueError("Missing server_ips")
# default to be an empty string
retry_counter_str = ""
# Validate status of the instance
if pa_instance.status is PrivateAttributionInstanceStatus.COMPUTATION_COMPLETED:
pa_instance.retry_counter = 0
elif pa_instance.status is PrivateAttributionInstanceStatus.AGGREGATION_FAILED:
pa_instance.retry_counter += 1
retry_counter_str = str(pa_instance.retry_counter)
elif pa_instance.status in [
PrivateAttributionInstanceStatus.ID_MATCHING_STARTED,
PrivateAttributionInstanceStatus.COMPUTATION_STARTED,
PrivateAttributionInstanceStatus.AGGREGATION_STARTED,
]:
# Whether this is | |
<filename>src/python/pants/engine/target_test.py<gh_stars>0
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from collections import namedtuple
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple
import pytest
from pants.engine.addresses import Address
from pants.engine.fs import GlobExpansionConjunction, GlobMatchErrorBehavior, Paths
from pants.engine.target import (
AsyncFieldMixin,
BoolField,
Dependencies,
DictStringToStringField,
DictStringToStringSequenceField,
ExplicitlyProvidedDependencies,
Field,
FieldSet,
GeneratedTargets,
GenerateSourcesRequest,
IntField,
InvalidFieldChoiceException,
InvalidFieldException,
InvalidFieldTypeException,
InvalidGeneratedTargetException,
InvalidTargetException,
MultipleSourcesField,
NestedDictStringToStringField,
OverridesField,
RequiredFieldMissingException,
ScalarField,
SequenceField,
SingleSourceField,
StringField,
StringSequenceField,
Tags,
Target,
generate_file_level_targets,
targets_with_sources_types,
)
from pants.engine.unions import UnionMembership
from pants.option.global_options import FilesNotFoundBehavior
from pants.testutil.pytest_util import no_exception
from pants.util.frozendict import FrozenDict
from pants.util.meta import FrozenInstanceError
from pants.util.ordered_set import FrozenOrderedSet, OrderedSet
# -----------------------------------------------------------------------------------------------
# Test core Field and Target abstractions
# -----------------------------------------------------------------------------------------------
class FortranExtensions(Field):
alias = "fortran_extensions"
value: Tuple[str, ...]
default = ()
@classmethod
def compute_value(cls, raw_value: Optional[Iterable[str]], address: Address) -> Tuple[str, ...]:
value_or_default = super().compute_value(raw_value, address)
# Add some arbitrary validation to test that hydration/validation works properly.
bad_extensions = [
extension for extension in value_or_default if not extension.startswith("Fortran")
]
if bad_extensions:
raise InvalidFieldException(
f"The {repr(cls.alias)} field in target {address} expects all elements to be "
f"prefixed by `Fortran`. Received {bad_extensions}.",
)
return tuple(value_or_default)
class FortranVersion(StringField):
alias = "version"
class UnrelatedField(BoolField):
alias = "unrelated"
default = False
class FortranTarget(Target):
alias = "fortran"
core_fields = (FortranExtensions, FortranVersion)
def validate(self) -> None:
if self[FortranVersion].value == "bad":
raise InvalidTargetException("Bad!")
def test_field_and_target_eq() -> None:
addr = Address("", target_name="tgt")
field = FortranVersion("dev0", addr)
assert field.value == "dev0"
other = FortranVersion("dev0", addr)
assert field == other
assert hash(field) == hash(other)
other = FortranVersion("dev1", addr)
assert field != other
assert hash(field) != hash(other)
# NB: because normal `Field`s throw away the address, these are equivalent.
other = FortranVersion("dev0", Address("", target_name="other"))
assert field == other
assert hash(field) == hash(other)
# Ensure the field is frozen.
with pytest.raises(FrozenInstanceError):
field.y = "foo" # type: ignore[attr-defined]
tgt = FortranTarget({"version": "dev0"}, addr)
assert tgt.address == addr
other_tgt = FortranTarget({"version": "dev0"}, addr)
assert tgt == other_tgt
assert hash(tgt) == hash(other_tgt)
other_tgt = FortranTarget({"version": "dev1"}, addr)
assert tgt != other_tgt
assert hash(tgt) != hash(other_tgt)
other_tgt = FortranTarget({"version": "dev0"}, Address("", target_name="other"))
assert tgt != other_tgt
assert hash(tgt) != hash(other_tgt)
# Ensure the target is frozen.
with pytest.raises(FrozenInstanceError):
tgt.y = "foo" # type: ignore[attr-defined]
# Ensure that subclasses are not equal.
class SubclassField(FortranVersion):
pass
subclass_field = SubclassField("dev0", addr)
assert field != subclass_field
assert hash(field) != hash(subclass_field)
class SubclassTarget(FortranTarget):
pass
subclass_tgt = SubclassTarget({"version": "dev0"}, addr)
assert tgt != subclass_tgt
assert hash(tgt) != hash(subclass_tgt)
def test_invalid_fields_rejected() -> None:
with pytest.raises(InvalidFieldException) as exc:
FortranTarget({"invalid_field": True}, Address("", target_name="lib"))
assert "Unrecognized field `invalid_field=True`" in str(exc)
assert "//:lib" in str(exc)
def test_get_field() -> None:
extensions = ("FortranExt1",)
tgt = FortranTarget({FortranExtensions.alias: extensions}, Address("", target_name="lib"))
assert tgt[FortranExtensions].value == extensions
assert tgt.get(FortranExtensions).value == extensions
assert tgt.get(FortranExtensions, default_raw_value=["FortranExt2"]).value == extensions
# Default field value. This happens when the field is registered on the target type, but the
# user does not explicitly set the field in the BUILD file.
default_field_tgt = FortranTarget({}, Address("", target_name="default"))
assert default_field_tgt[FortranExtensions].value == ()
assert default_field_tgt.get(FortranExtensions).value == ()
assert default_field_tgt.get(FortranExtensions, default_raw_value=["FortranExt2"]).value == ()
# Example of a call site applying its own default value instead of the field's default value.
assert default_field_tgt[FortranExtensions].value or 123 == 123
assert (
FortranTarget.class_get_field(FortranExtensions, union_membership=UnionMembership({}))
is FortranExtensions
)
# Field is not registered on the target.
with pytest.raises(KeyError) as exc:
default_field_tgt[UnrelatedField]
assert UnrelatedField.__name__ in str(exc)
with pytest.raises(KeyError) as exc:
FortranTarget.class_get_field(UnrelatedField, union_membership=UnionMembership({}))
assert UnrelatedField.__name__ in str(exc)
assert default_field_tgt.get(UnrelatedField).value == UnrelatedField.default
assert default_field_tgt.get(
UnrelatedField, default_raw_value=not UnrelatedField.default
).value == (not UnrelatedField.default)
def test_field_hydration_is_eager() -> None:
with pytest.raises(InvalidFieldException) as exc:
FortranTarget(
{FortranExtensions.alias: ["FortranExt1", "DoesNotStartWithFortran"]},
Address("", target_name="bad_extension"),
)
assert "DoesNotStartWithFortran" in str(exc)
assert "//:bad_extension" in str(exc)
def test_has_fields() -> None:
empty_union_membership = UnionMembership({})
tgt = FortranTarget({}, Address("", target_name="lib"))
assert tgt.field_types == (FortranExtensions, FortranVersion)
assert FortranTarget.class_field_types(union_membership=empty_union_membership) == (
FortranExtensions,
FortranVersion,
)
assert tgt.has_fields([]) is True
assert FortranTarget.class_has_fields([], union_membership=empty_union_membership) is True
assert tgt.has_fields([FortranExtensions]) is True
assert tgt.has_field(FortranExtensions) is True
assert (
FortranTarget.class_has_fields([FortranExtensions], union_membership=empty_union_membership)
is True
)
assert (
FortranTarget.class_has_field(FortranExtensions, union_membership=empty_union_membership)
is True
)
assert tgt.has_fields([UnrelatedField]) is False
assert tgt.has_field(UnrelatedField) is False
assert (
FortranTarget.class_has_fields([UnrelatedField], union_membership=empty_union_membership)
is False
)
assert (
FortranTarget.class_has_field(UnrelatedField, union_membership=empty_union_membership)
is False
)
assert tgt.has_fields([FortranExtensions, UnrelatedField]) is False
assert (
FortranTarget.class_has_fields(
[FortranExtensions, UnrelatedField], union_membership=empty_union_membership
)
is False
)
def test_add_custom_fields() -> None:
class CustomField(BoolField):
alias = "custom_field"
default = False
union_membership = UnionMembership.from_rules(
[FortranTarget.register_plugin_field(CustomField)]
)
tgt_values = {CustomField.alias: True}
tgt = FortranTarget(
tgt_values, Address("", target_name="lib"), union_membership=union_membership
)
assert tgt.field_types == (FortranExtensions, FortranVersion, CustomField)
assert tgt.core_fields == (FortranExtensions, FortranVersion)
assert tgt.plugin_fields == (CustomField,)
assert tgt.has_field(CustomField) is True
assert FortranTarget.class_field_types(union_membership=union_membership) == (
FortranExtensions,
FortranVersion,
CustomField,
)
assert FortranTarget.class_has_field(CustomField, union_membership=union_membership) is True
assert (
FortranTarget.class_get_field(CustomField, union_membership=union_membership) is CustomField
)
assert tgt[CustomField].value is True
default_tgt = FortranTarget(
{}, Address("", target_name="default"), union_membership=union_membership
)
assert default_tgt[CustomField].value is False
# Ensure that the `PluginField` is not being registered on other target types.
class OtherTarget(Target):
alias = "other_target"
core_fields = ()
other_tgt = OtherTarget({}, Address("", target_name="other"))
assert other_tgt.plugin_fields == ()
assert other_tgt.has_field(CustomField) is False
def test_override_preexisting_field_via_new_target() -> None:
# To change the behavior of a pre-existing field, you must create a new target as it would not
# be safe to allow plugin authors to change the behavior of core target types.
#
# Because the Target API does not care about the actual target type and we only check that the
# target has the required fields via Target.has_fields(), it is safe to create a new target
# that still works where the original target was expected.
#
# However, this means that we must ensure `Target.get()` and `Target.has_fields()` will work
# with subclasses of the original `Field`s.
class CustomFortranExtensions(FortranExtensions):
banned_extensions = ("FortranBannedExt",)
default_extensions = ("FortranCustomExt",)
@classmethod
def compute_value(
cls, raw_value: Optional[Iterable[str]], address: Address
) -> Tuple[str, ...]:
# Ensure that we avoid certain problematic extensions and always use some defaults.
specified_extensions = super().compute_value(raw_value, address)
banned = [
extension
for extension in specified_extensions
if extension in cls.banned_extensions
]
if banned:
raise InvalidFieldException(
f"The {repr(cls.alias)} field in target {address} is using banned "
f"extensions: {banned}"
)
return (*specified_extensions, *cls.default_extensions)
class CustomFortranTarget(Target):
alias = "custom_fortran"
core_fields = tuple(
{*FortranTarget.core_fields, CustomFortranExtensions} - {FortranExtensions}
)
custom_tgt = CustomFortranTarget(
{FortranExtensions.alias: ["FortranExt1"]}, Address("", target_name="custom")
)
assert custom_tgt.has_field(FortranExtensions) is True
assert custom_tgt.has_field(CustomFortranExtensions) is True
assert custom_tgt.has_fields([FortranExtensions, CustomFortranExtensions]) is True
assert (
CustomFortranTarget.class_get_field(FortranExtensions, union_membership=UnionMembership({}))
is CustomFortranExtensions
)
# Ensure that subclasses not defined on a target are not accepted. This allows us to, for
# example, filter every target with `PythonSources` (or a subclass) and to ignore targets with
# only `SourcesField`.
normal_tgt = FortranTarget({}, Address("", target_name="normal"))
assert normal_tgt.has_field(FortranExtensions) is True
assert normal_tgt.has_field(CustomFortranExtensions) is False
assert custom_tgt[FortranExtensions] == custom_tgt[CustomFortranExtensions]
assert custom_tgt[FortranExtensions].value == (
"FortranExt1",
*CustomFortranExtensions.default_extensions,
)
# Check custom default value
assert (
CustomFortranTarget({}, Address("", target_name="default"))[FortranExtensions].value
== CustomFortranExtensions.default_extensions
)
# Custom validation
with pytest.raises(InvalidFieldException) as exc:
CustomFortranTarget(
{FortranExtensions.alias: CustomFortranExtensions.banned_extensions},
Address("", target_name="invalid"),
)
assert str(list(CustomFortranExtensions.banned_extensions)) in str(exc)
assert "//:invalid" in str(exc)
def test_required_field() -> None:
class RequiredField(StringField):
alias = "field"
required = True
class RequiredTarget(Target):
alias = "required_target"
core_fields = (RequiredField,)
address = Address("", target_name="lib")
# No errors when defined
RequiredTarget({"field": "present"}, address)
with pytest.raises(RequiredFieldMissingException) as exc:
RequiredTarget({}, address)
assert str(address) in str(exc.value)
assert "field" in str(exc.value)
def test_async_field_mixin() -> None:
class ExampleField(IntField, AsyncFieldMixin):
alias = "field"
default = 10
addr = Address("", target_name="tgt")
field = ExampleField(None, addr)
assert field.value == 10
assert field.address == addr
ExampleField.mro() # Regression test that the mro is resolvable.
# Ensure equality and __hash__ work correctly.
other = ExampleField(None, addr)
assert field == other
assert hash(field) == hash(other)
other = ExampleField(25, addr)
assert field != other
assert hash(field) != hash(other)
# Whereas normally the address is not considered, it is considered for async fields.
other = ExampleField(None, Address("", target_name="other"))
assert field != other
assert hash(field) != hash(other)
# Ensure it's still frozen.
with pytest.raises(FrozenInstanceError):
field.y = "foo" # type: ignore[attr-defined]
# Ensure that subclasses are not equal.
class Subclass(ExampleField):
pass
subclass = Subclass(None, addr)
assert field != subclass
assert hash(field) != hash(subclass)
def test_target_validate() -> None:
with pytest.raises(InvalidTargetException):
FortranTarget({FortranVersion.alias: "bad"}, Address("", target_name="t"))
def test_target_residence_dir() -> None:
assert FortranTarget({}, Address("some_dir/subdir")).residence_dir == "some_dir/subdir"
assert (
FortranTarget({}, Address("some_dir/subdir"), residence_dir="another_dir").residence_dir
| |
None:
# Shape should match data
if bad_map.ndim == 2 and bad_map.shape != data[0].shape:
raise ValueError(
f"2D bad_map should have the same shape as a frame ({data[0].shape}),"
f" but has shape {bad_map.shape}"
)
elif bad_map.ndim == 3 and bad_map.shape != data.shape:
raise ValueError(
f"3D bad_map should have the same shape as data cube ({data.shape}),"
f" but has shape {bad_map.shape}"
)
elif bad_map.ndim == 2:
bad_map = np.repeat(bad_map[np.newaxis, :], n_im, axis=0)
return bad_map, add_bad
def show_clean_params(
filename,
isz,
r1=None,
dr=None,
bad_map=None,
add_bad=None,
edge=0,
remove_bad=True,
nframe=0,
ihdu=0,
f_kernel=3,
offx=0,
offy=0,
apod=False,
window=None,
*,
mask=None,
):
"""Display the input parameters for the cleaning.
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: 256)\n
`r1` {int}: Radius of the rings to compute background sky (default: 100)\n
`dr` {int}: Outer radius to compute sky (default: 10)\n
`bad_map` {array}: Bad pixel map with 0 and 1 where 1 set for a bad pixel (default: None),\n
`add_bad` {list}: List of 2d coordinates of bad pixels/cosmic rays (default: []),\n
`edge` {int}: Number of pixel to be removed on the edge of the image (SPHERE),\n
`remove_bad` {bool}: If True, the bad pixels are removed using a gaussian interpolation,\n
`nframe` {int}: Frame number to be shown (default: 0),\n
`ihdu` {int}: Hdu number of the fits file. Normally 1 for NIRISS and 0 for SPHERE (default: 0).
"""
with fits.open(filename) as fd:
data = fd[ihdu].data
img0 = data[nframe]
dims = img0.shape
if isz is None:
print(
"Warning: isz not found (None by default). isz is set to the original image size (%i)"
% (dims[0]),
file=sys.stderr,
)
isz = dims[0]
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
bmap0 = bad_map[nframe]
ab0 = add_bad[nframe]
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
if (bad_map is not None) & (remove_bad):
img1 = fix_bad_pixels(img0, bmap0, add_bad=ab0)
else:
img1 = img0.copy()
cropped_infos = crop_max(img1, isz, offx=offx, offy=offy, f=f_kernel)
pos = cropped_infos[1]
noBadPixel = False
bad_pix_x, bad_pix_y = [], []
if np.any(bmap0):
if len(ab0) != 0:
for j in range(len(ab0)):
bmap0[ab0[j][1], ab0[j][0]] = 1
bad_pix = np.where(bmap0 == 1)
bad_pix_x = bad_pix[0]
bad_pix_y = bad_pix[1]
else:
noBadPixel = True
theta = np.linspace(0, 2 * np.pi, 100)
x0 = pos[0]
y0 = pos[1]
if r1 is not None:
x1 = r1 * np.cos(theta) + x0
y1 = r1 * np.sin(theta) + y0
if dr is not None:
r2 = r1 + dr
x2 = r2 * np.cos(theta) + x0
y2 = r2 * np.sin(theta) + y0
sky_method = "ring"
elif mask is not None:
bg_coords = np.where(mask == 1)
bg_x = bg_coords[0]
bg_y = bg_coords[1]
sky_method = "mask"
if window is not None:
r3 = window
x3 = r3 * np.cos(theta) + x0
y3 = r3 * np.sin(theta) + y0
xs1, ys1 = x0 + isz // 2, y0 + isz // 2
xs2, ys2 = x0 - isz // 2, y0 + isz // 2
xs3, ys3 = x0 - isz // 2, y0 - isz // 2
xs4, ys4 = x0 + isz // 2, y0 - isz // 2
max_val = img1[y0, x0]
fig = plt.figure(figsize=(5, 5))
plt.title("--- CLEANING PARAMETERS ---")
plt.imshow(img1, norm=PowerNorm(0.5, vmin=0, vmax=max_val), cmap="afmhot")
if sky_method == "ring":
if dr is not None:
plt.plot(x1, y1, label="Inner radius for sky subtraction")
plt.plot(x2, y2, label="Outer radius for sky subtraction")
else:
plt.plot(x1, y1, label="Boundary for sky subtraction")
elif sky_method == "mask":
plt.scatter(
bg_y,
bg_x,
color="None",
marker="s",
edgecolors="C0",
s=20,
label="Pixels used for sky subtraction",
)
if apod:
if window is not None:
plt.plot(x3, y3, "--", label="Super-gaussian windowing")
plt.plot(x0, y0, "+", color="c", ms=10, label="Centering position")
plt.plot(
[xs1, xs2, xs3, xs4, xs1],
[ys1, ys2, ys3, ys4, ys1],
"w--",
label="Resized image",
)
plt.xlim((0, dims[0] - 1))
plt.ylim((0, dims[1] - 1))
if not noBadPixel:
if remove_bad:
label = "Fixed hot/bad pixels"
else:
label = "Hot/bad pixels"
plt.scatter(
bad_pix_y,
bad_pix_x,
color="None",
marker="s",
edgecolors="r",
facecolors="None",
s=20,
label=label,
)
plt.xlabel("X [pix]")
plt.ylabel("Y [pix]")
plt.legend(fontsize=8, loc=1)
plt.tight_layout()
return fig
def _apply_edge_correction(img0, edge=0):
"""Remove the bright edges (set to 0) observed for
some detectors (SPHERE)."""
if edge != 0:
img0[:, 0:edge] = 0
img0[:, -edge:-1] = 0
img0[0:edge, :] = 0
img0[-edge:-1, :] = 0
return img0
def _remove_dark(img1, darkfile=None, ihdu=0, verbose=False):
if darkfile is not None:
with fits.open(darkfile) as hdu:
dark = hdu[ihdu].data
if verbose:
print("Dark cube shape is:", dark.shape)
master_dark = np.mean(dark, axis=0)
img1 -= master_dark
return img1
def clean_data(
data,
isz=None,
r1=None,
dr=None,
edge=0,
bad_map=None,
add_bad=None,
apod=True,
offx=0,
offy=0,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
*,
mask=None,
):
"""Clean data.
Parameters:
-----------
`data` {np.array} -- datacube containing the NRM data\n
`isz` {int} -- Size of the cropped image (default: {None})\n
`r1` {int} -- Radius of the rings to compute background sky (default: {None})\n
`dr` {int} -- Outer radius to compute sky (default: {None})\n
`edge` {int} -- Patch the edges of the image (VLT/SPHERE artifact, default: {200}),\n
`checkrad` {bool} -- If True, check the resizing and sky substraction parameters (default: {False})\n
Returns:
--------
`cube` {np.array} -- Cleaned datacube.
"""
n_im = data.shape[0]
cube_cleaned = [] # np.zeros([n_im, isz, isz])
l_bad_frame = []
bad_map, add_bad = _get_3d_bad_pixels(bad_map, add_bad, data)
for i in tqdm(range(n_im), ncols=100, desc="Cleaning", leave=False):
img0 = data[i]
img0 = _apply_edge_correction(img0, edge=edge)
if bad_map is not None:
img1 = fix_bad_pixels(img0, bad_map[i], add_bad=add_bad[i])
else:
img1 = img0.copy()
img1 = _remove_dark(img1, darkfile=darkfile, verbose=verbose)
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
center = find_max(img1, filtmed=filtmed, f=f_kernel)
else:
center = None
if sky and (r1 is not None or mask is not None):
img_biased = sky_correction(
img1, r1=r1, dr=dr, verbose=verbose, center=center, mask=mask
)[0]
elif sky:
warnings.warn(
"sky is set to True, but r1 and mask are set to None. Skipping sky correction",
RuntimeWarning,
)
img_biased = img1.copy()
else:
img_biased = img1.copy()
img_biased[img_biased < 0] = 0 # Remove negative pixels
if isz is not None:
# Get expected center for sky correction
filtmed = f_kernel is not None
im_rec_max = crop_max(
img_biased, isz, offx=offx, offy=offy, filtmed=filtmed, f=f_kernel
)[0]
else:
im_rec_max = img_biased.copy()
if (
(im_rec_max.shape[0] != im_rec_max.shape[1])
or (isz is not None and im_rec_max.shape[0] != isz)
or (isz is None and im_rec_max.shape[0] != img0.shape[0])
):
l_bad_frame.append(i)
else:
if apod and window is not None:
img = apply_windowing(im_rec_max, window=window)
elif apod:
warnings.warn(
"apod is set to True, but window is None. Skipping apodisation",
RuntimeWarning,
)
img = im_rec_max.copy()
else:
img = im_rec_max.copy()
cube_cleaned.append(img)
if verbose:
print("Bad centering frame number:", l_bad_frame)
cube_cleaned = np.array(cube_cleaned)
return cube_cleaned
def select_clean_data(
filename,
isz=256,
r1=None,
dr=None,
edge=0,
clip=True,
bad_map=None,
add_bad=None,
offx=0,
offy=0,
clip_fact=0.5,
apod=True,
sky=True,
window=None,
darkfile=None,
f_kernel=3,
verbose=False,
ihdu=0,
display=False,
*,
remove_bad=True,
nframe=0,
mask=None,
):
"""Clean and select good datacube (sigma-clipping using fluxes variations).
Parameters:
-----------
`filename` {str}: filename containing the datacube,\n
`isz` {int}: Size of the cropped image (default: {256})\n
`r1` {int}: Radius of the rings to compute background sky (default: {100})\n
`dr` {int}: Outer radius to compute sky (default: {10})\n
`edge` {int}: Patch the edges of the image (VLT/SPHERE artifact, default: {0}),\n
`clip` {bool}: If True, sigma-clipping is used to reject frames with low integrated flux,\n
`clip_fact` {float}: Relative sigma if rejecting frames by sigma-clipping,\n
`apod` {bool}: If True, apodisation is performed in the image plan using a super-gaussian
function (known as windowing). The gaussian FWHM is set by the parameter `window`,\n
`window` {float}: FWHM of the super-gaussian to apodise the image (smoothly go to zero
on the edges),\n
`sky` {bool}: If True, the sky is remove using the annulus technique (computed between `r1`
and `r1` + `dr`),
`darkfile` {str}: If specified (default: None), the input dark (master_dark averaged if
multiple integrations) is substracted from the raw image,\n
image,\n
`f_kernel` {float}: kernel size used in the applied median filter (to find the center).
`remove_bad` {bool}: If True, the bad pixels are removed in the cleaning parameter
plots using a gaussian interpolation | |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class DatabaseCluster(pulumi.CustomResource):
database: pulumi.Output[str]
"""
Name of the cluster's default database.
"""
engine: pulumi.Output[str]
"""
Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, or `redis` for Redis).
"""
eviction_policy: pulumi.Output[str]
"""
A string specifying the eviction policy for a Redis cluster. Valid values are: `noeviction`, `allkeys_lru`, `allkeys_random`, `volatile_lru`, `volatile_random`, or `volatile_ttl`.
"""
host: pulumi.Output[str]
"""
Database cluster's hostname.
"""
maintenance_windows: pulumi.Output[list]
"""
Defines when the automatic maintenance should be performed for the database cluster.
* `day` (`str`) - The day of the week on which to apply maintenance updates.
* `hour` (`str`) - The hour in UTC at which maintenance updates will be applied in 24 hour format.
"""
name: pulumi.Output[str]
"""
The name of the database cluster.
"""
node_count: pulumi.Output[float]
"""
Number of nodes that will be included in the cluster.
"""
password: pulumi.Output[str]
"""
Password for the cluster's default user.
"""
port: pulumi.Output[float]
"""
Network port that the database cluster is listening on.
"""
private_host: pulumi.Output[str]
"""
Same as `host`, but only accessible from resources within the account and in the same region.
"""
private_network_uuid: pulumi.Output[str]
"""
The ID of the VPC where the database cluster will be located.
"""
private_uri: pulumi.Output[str]
"""
Same as `uri`, but only accessible from resources within the account and in the same region.
"""
region: pulumi.Output[str]
"""
DigitalOcean region where the cluster will reside.
"""
size: pulumi.Output[str]
"""
Database Droplet size associated with the cluster (ex. `db-s-1vcpu-1gb`).
"""
sql_mode: pulumi.Output[str]
"""
A comma separated string specifying the SQL modes for a MySQL cluster.
"""
tags: pulumi.Output[list]
"""
A list of tag names to be applied to the database cluster.
"""
uri: pulumi.Output[str]
"""
The full URI for connecting to the database cluster.
"""
urn: pulumi.Output[str]
"""
The uniform resource name of the database cluster.
"""
user: pulumi.Output[str]
"""
Username for the cluster's default user.
"""
version: pulumi.Output[str]
"""
Engine version used by the cluster (ex. `11` for PostgreSQL 11).
"""
def __init__(__self__, resource_name, opts=None, engine=None, eviction_policy=None, maintenance_windows=None, name=None, node_count=None, private_network_uuid=None, region=None, size=None, sql_mode=None, tags=None, version=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a DigitalOcean database cluster resource.
## Example Usage
### Create a new PostgreSQL database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
postgres_example = digitalocean.DatabaseCluster("postgres-example",
engine="pg",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="11")
```
### Create a new MySQL database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
mysql_example = digitalocean.DatabaseCluster("mysql-example",
engine="mysql",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="8")
```
### Create a new Redis database cluster
```python
import pulumi
import pulumi_digitalocean as digitalocean
redis_example = digitalocean.DatabaseCluster("redis-example",
engine="redis",
node_count=1,
region="nyc1",
size="db-s-1vcpu-1gb",
version="5")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] engine: Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, or `redis` for Redis).
:param pulumi.Input[str] eviction_policy: A string specifying the eviction policy for a Redis cluster. Valid values are: `noeviction`, `allkeys_lru`, `allkeys_random`, `volatile_lru`, `volatile_random`, or `volatile_ttl`.
:param pulumi.Input[list] maintenance_windows: Defines when the automatic maintenance should be performed for the database cluster.
:param pulumi.Input[str] name: The name of the database cluster.
:param pulumi.Input[float] node_count: Number of nodes that will be included in the cluster.
:param pulumi.Input[str] private_network_uuid: The ID of the VPC where the database cluster will be located.
:param pulumi.Input[str] region: DigitalOcean region where the cluster will reside.
:param pulumi.Input[str] size: Database Droplet size associated with the cluster (ex. `db-s-1vcpu-1gb`).
:param pulumi.Input[str] sql_mode: A comma separated string specifying the SQL modes for a MySQL cluster.
:param pulumi.Input[list] tags: A list of tag names to be applied to the database cluster.
:param pulumi.Input[str] version: Engine version used by the cluster (ex. `11` for PostgreSQL 11).
The **maintenance_windows** object supports the following:
* `day` (`pulumi.Input[str]`) - The day of the week on which to apply maintenance updates.
* `hour` (`pulumi.Input[str]`) - The hour in UTC at which maintenance updates will be applied in 24 hour format.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
if engine is None:
raise TypeError("Missing required property 'engine'")
__props__['engine'] = engine
__props__['eviction_policy'] = eviction_policy
__props__['maintenance_windows'] = maintenance_windows
__props__['name'] = name
if node_count is None:
raise TypeError("Missing required property 'node_count'")
__props__['node_count'] = node_count
__props__['private_network_uuid'] = private_network_uuid
if region is None:
raise TypeError("Missing required property 'region'")
__props__['region'] = region
if size is None:
raise TypeError("Missing required property 'size'")
__props__['size'] = size
__props__['sql_mode'] = sql_mode
__props__['tags'] = tags
__props__['version'] = version
__props__['database'] = None
__props__['host'] = None
__props__['password'] = None
__props__['port'] = None
__props__['private_host'] = None
__props__['private_uri'] = None
__props__['uri'] = None
__props__['urn'] = None
__props__['user'] = None
super(DatabaseCluster, __self__).__init__(
'digitalocean:index/databaseCluster:DatabaseCluster',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, database=None, engine=None, eviction_policy=None, host=None, maintenance_windows=None, name=None, node_count=None, password=<PASSWORD>, port=None, private_host=None, private_network_uuid=None, private_uri=None, region=None, size=None, sql_mode=None, tags=None, uri=None, urn=None, user=None, version=None):
"""
Get an existing DatabaseCluster resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] database: Name of the cluster's default database.
:param pulumi.Input[str] engine: Database engine used by the cluster (ex. `pg` for PostreSQL, `mysql` for MySQL, or `redis` for Redis).
:param pulumi.Input[str] eviction_policy: A string specifying the eviction policy for a Redis cluster. Valid values are: `noeviction`, `allkeys_lru`, `allkeys_random`, `volatile_lru`, `volatile_random`, or `volatile_ttl`.
:param pulumi.Input[str] host: Database cluster's hostname.
:param pulumi.Input[list] maintenance_windows: Defines when the automatic maintenance should be performed for the database cluster.
:param pulumi.Input[str] name: The name of the database cluster.
:param pulumi.Input[float] node_count: Number of nodes that will be included in the cluster.
:param pulumi.Input[str] password: <PASSWORD> cluster'<PASSWORD>.
:param pulumi.Input[float] port: Network port that the database cluster is listening on.
:param pulumi.Input[str] private_host: Same as `host`, but only accessible from resources within the account and in the same region.
:param pulumi.Input[str] private_network_uuid: The ID of the VPC where the database cluster will be located.
:param pulumi.Input[str] private_uri: Same as `uri`, but only accessible from resources within the account and in the same region.
:param pulumi.Input[str] region: DigitalOcean region where the cluster will reside.
:param pulumi.Input[str] size: Database Droplet size associated with the cluster (ex. `db-s-1vcpu-1gb`).
:param pulumi.Input[str] sql_mode: A comma separated string specifying the SQL modes for a MySQL cluster.
:param pulumi.Input[list] tags: A list of tag names to be applied to the database cluster.
:param pulumi.Input[str] uri: The full URI for connecting to the database cluster.
:param pulumi.Input[str] urn: The uniform resource name of the database cluster.
:param pulumi.Input[str] user: Username for the cluster's default user.
:param pulumi.Input[str] version: Engine version used by the cluster (ex. `11` for PostgreSQL 11).
The **maintenance_windows** object supports the following:
* `day` (`pulumi.Input[str]`) - The day of the week on which to apply maintenance updates.
* `hour` (`pulumi.Input[str]`) - The hour in UTC at which maintenance updates will be applied in 24 hour format.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["database"] = database
__props__["engine"] = engine
__props__["eviction_policy"] = eviction_policy
__props__["host"] = host
__props__["maintenance_windows"] = maintenance_windows
__props__["name"] = name
__props__["node_count"] = node_count
__props__["password"] = | |
<filename>shaDow/utils.py
import os
import torch
import glob
import numpy as np
import scipy.sparse as sp
import yaml
from sklearn.preprocessing import StandardScaler
from shaDow.globals import git_rev, timestamp, Logger
from torch_scatter import scatter
from copy import deepcopy
from typing import List, Union
from shaDow import TRAIN, VALID, TEST
from shaDow.data_converter import convert2shaDow, to_undirected
def load_data(prefix, dataset, config_data, os_='linux'):
Logger.printf("Loading training data..")
prefix_l = prefix['local']
fs_shadow = ['adj_full_raw.np[yz]', 'adj_train_raw.np[yz]', 'label_full.npy', 'feat_full.npy', 'split.npy']
if not all(glob.glob(f"{prefix_l}/{dataset}/{f}") for f in fs_shadow):
convert2shaDow(dataset, prefix_l)
role = np.load(f"./{prefix_l}/{dataset}/split.npy", allow_pickle=True)
if type(role) == np.ndarray:
role = role[()]
else:
assert type(role) == dict
# role is used as index, which is required to be int64 (node_set won't take much mem anyways)
node_set = {TRAIN: np.asarray(role[TRAIN], dtype=np.int64),
VALID: np.asarray(role[VALID], dtype=np.int64),
TEST : np.asarray(role[TEST], dtype=np.int64)}
# load adj. If we want to convert to_undirected, and the undirected adj has been stored as external file,
# then we skip the conversion in the program and directly load the undirected adj.
bin_adj_files = {TRAIN: {'indptr': None, 'indices': None, 'data': None},
VALID: {'indptr': None, 'indices': None, 'data': None},
TEST: {'indptr': None, 'indices': None, 'data': None}}
def fill_bin_adj_dict(mode_, split_, type_):
for d in ['indptr', 'indices', 'data']:
bin_adj_files[mode_][d] = f"{prefix_l}/{dataset}/cpp/adj_{split_}_{type_}_{d}.bin"
if config_data['to_undirected']:
if (adj_full := load_adj(prefix_l, dataset, 'undirected', 'full')) is None:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
adj_full = to_undirected(adj_full)
fill_bin_adj_dict(VALID, 'full', 'undirected')
fill_bin_adj_dict(TEST, 'full', 'undirected')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'undirected')
elif (adj_train := load_adj(prefix_l, dataset, 'undirected', 'train')) is None:
adj_train = load_adj(prefix_l, dataset, 'raw', 'train')
adj_train = to_undirected(adj_train)
fill_bin_adj_dict(TRAIN, 'train', 'undirected')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
else:
adj_full = load_adj(prefix_l, dataset, 'raw', 'full')
fill_bin_adj_dict(VALID, 'full', 'raw')
fill_bin_adj_dict(TEST, 'full', 'raw')
if config_data['transductive']:
adj_train = adj_full
fill_bin_adj_dict(TRAIN, 'full', 'raw')
else:
adj_train = load_adj(prefix, dataset, 'raw', 'train')
assert set(adj_train.nonzero()[0]).issubset(set(node_set[TRAIN]))
fill_bin_adj_dict(TRAIN, 'train', 'raw')
bin_adj_files = validate_bin_file(bin_adj_files)
Logger.printf(f"SETTING TO {'TRANS' if config_data['transductive'] else 'IN'}DUCTIVE LEARNING", style="red")
label_full = np.load(f"./{prefix_l}/{dataset}/label_full.npy")
label_full = torch.from_numpy(label_full)
# ======= deal with feats =======
mode_norm = 'all' if config_data['transductive'] else 'train'
if config_data['norm_feat'] and os.path.isfile(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy"):
feats = np.load(f"./{prefix_l}/{dataset}/feat_full_norm_{mode_norm}.npy")
Logger.printf(f"Loading '{mode_norm}'-normalized features", style='yellow')
else:
feats = np.load(f"./{prefix_l}/{dataset}/feat_full.npy")
if config_data['norm_feat']:
feats_fit = feats if config_data['transductive'] else feats[node_set[TRAIN]]
scaler = StandardScaler()
scaler.fit(feats_fit)
feats = scaler.transform(feats)
Logger.printf(f"Normalizing node features (mode = {mode_norm})", style="yellow")
else:
Logger.printf("Not normalizing node features", style="yellow")
feats = torch.from_numpy(feats.astype(np.float32, copy=False))
Logger.printf("Done loading training data..")
return {'adj_full' : adj_full,
'adj_train' : adj_train,
'feat_full' : feats,
'label_full': label_full,
'node_set' : node_set,
'bin_adj_files': bin_adj_files}
def parse_n_prepare(task, args, name_graph, dir_log, os_='linux'):
# [config]
if args.configs is not None:
config_train = args.configs
else:
assert task in ['inference', 'postproc']
if task == 'inference':
if args.inference_configs is None:
assert not args.compute_complexity_only
dir_candy = args.inference_dir
else:
assert args.inference_dir is None and args.compute_complexity_only
dir_candy = None
config_train = args.inference_configs
else:
if args.postproc_dir is not None:
dir_candy = args.postproc_dir
else:
with open(args.postproc_configs) as f:
config_temp = yaml.load(f, Loader=yaml.FullLoader)
if 'dir_pred_mat' in config_temp: # all such dirs MUST contain the same yaml
dir_candy = config_temp['dir_pred_mat'][0]
elif 'dir_emb_mat' in config_temp: # all ens models should have the same arch (only differs in sampler)
dir_candy = next(iter(config_temp['dir_emb_mat'].values()))[0]
else:
raise NotImplementedError
if dir_candy is not None:
assert os.path.isdir(dir_candy)
f_yml = [f for f in os.listdir(dir_candy) if f.split('.')[-1] in ['yml', 'yaml']]
assert len(f_yml) == 1
config_train = f"{dir_candy}/{f_yml[0]}"
with open(config_train) as f_config_train:
config_train = yaml.load(f_config_train, Loader=yaml.FullLoader)
config_train_copy = deepcopy(config_train)
# [data]
config_data = {"to_undirected" : False,
"transductive" : False,
"norm_feat" : True}
config_data.update(config_train['data'])
# [arch]
arch_gnn = { # default values
"dim" : -1,
"aggr" : "sage",
"residue" : "none",
"pooling" : "center",
"loss" : "softmax",
"num_layers" : -1,
"act" : "I",
"heads" : -1,
"feature_augment" : "hops",
"feature_smoothen" : "none",
"label_smoothen" : "none", # label_smoothen is only considered if use_label != none
"ensemble_act" : "leakyrelu",
"branch_sharing" : False,
"use_label" : "none"
}
arch_gnn.update(config_train["architecture"])
assert arch_gnn['aggr'] in ['sage', 'gat', 'gatscat', 'gcn', 'mlp', 'gin', 'sgc', 'sign']
assert arch_gnn['use_label'].lower() in ['all', 'none', 'no_valid']
assert arch_gnn['pooling'].lower().split('-')[0] in ['mean', 'max', 'sum', 'center', 'sort']
assert arch_gnn['residue'].lower() in ['sum', 'concat', 'max', 'none']
assert arch_gnn['feature_augment'].lower() in ['hops', 'ppr', 'none']
if arch_gnn["feature_augment"] and arch_gnn["feature_augment"].lower() != "none":
arch_gnn["feature_augment"] = set(k for k in arch_gnn["feature_augment"].split("-"))
else:
arch_gnn['feature_augment'] = set()
# [params]
params_train = {
"lr" : 0.01,
"dropedge" : 0.0,
"ensemble_dropout" : "none"
}
params_train.update(config_train["hyperparameter"])
params_train["lr"] = float(params_train["lr"])
# [sampler]
sampler_preproc, sampler_train = [], []
for s in config_train['sampler']:
phase = s.pop('phase')
if phase == 'preprocess':
sampler_preproc.append(s)
elif phase == 'train':
sampler_train.append(s)
else:
raise NotImplementedError
batch_size = config_train["hyperparameter"]["batch_size"]
config_sampler_preproc = {"batch_size": batch_size, "configs": sampler_preproc}
config_sampler_train = {"batch_size": batch_size, "configs": sampler_train}
# add self-edges for certain arch. e.g., for GAT, will be divide-by-0 error in grad without self-edges
if arch_gnn["aggr"] in ["gcn", "gat", "gatscat"]:
for sc in config_sampler_train["configs"]:
num_ens = [len(v) for k, v in sc.items() if k != 'method']
assert max(num_ens) == min(num_ens)
sc["add_self_edge"] = [True] * num_ens[0]
# [copy yml]
name_key = f"{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
dir_log_full = log_dir(task, config_train_copy, name_key, dir_log, name_graph, git_rev, timestamp)
return params_train, config_sampler_preproc, config_sampler_train, config_data, arch_gnn, dir_log_full
def parse_n_prepare_postproc(dir_load, f_config, name_graph, dir_log, arch_gnn, logger):
if f_config is not None:
with open(f_config) as f:
config_postproc = yaml.load(f, Loader=yaml.FullLoader)
name_key = f"postproc-{arch_gnn['aggr']}_{arch_gnn['num_layers']}"
log_dir('postproc', config_postproc, name_key, dir_log, name_graph, git_rev, timestamp)
skip_instantiate = []
if 'check_record' in config_postproc:
load_acc_record = config_postproc['check_record']
else:
load_acc_record = True
if config_postproc['method'] == 'cs': # C&S
acc_record = [] if load_acc_record else None
if dir_load is not None:
if 'dir_pred_mat' not in config_postproc:
config_postproc['dir_pred_mat'] = [dir_load]
elif os.path.realpath(dir_load) not in [os.path.realpath(pc) for pc in config_postproc['dir_pred_mat']]:
config_postproc['dir_pred_mat'].append(dir_load)
config_postproc['pred_mat'] = [None] * len(config_postproc['dir_pred_mat'])
for i, di in enumerate(config_postproc['dir_pred_mat']):
if load_acc_record:
acc_record.append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'cs' == f.split('.')[-1] and f.startswith('pred_mat'):
config_postproc['pred_mat'][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for m in config_postproc['pred_mat']):
skip_instantiate = ['data', 'model']
elif config_postproc['method'] == 'ensemble': # Variant of subgraph ensemble as postproc
acc_record = {s: [] for s in config_postproc['dir_emb_mat']} if load_acc_record else None
assert dir_load is None
config_postproc['emb_mat'] = {k: [None] * len(v) for k, v in config_postproc['dir_emb_mat'].items()}
for sname, dirs_l in config_postproc['dir_emb_mat'].items():
for i, di in enumerate(dirs_l):
if load_acc_record:
acc_record[sname].append(logger.decode_csv('final', di))
for f in os.listdir(di):
if 'ens' == f.split('.')[-1] and f.startswith('emb_mat'):
config_postproc['emb_mat'][sname][i] = torch.load(f"{di}/{f}")
break
if all(m is not None for s, mat_l in config_postproc['emb_mat'].items() for m in mat_l):
skip_instantiate = ['model'] # you have to load data (role, labels) anyways
return config_postproc, acc_record, skip_instantiate
def log_dir(task, config_new, yml_name_key, dir_log, name_graph, git_rev, timestamp):
if task == 'train':
prefix = 'running'
elif task == 'inference':
prefix = 'INF'
elif task == 'postproc':
prefix = 'POST'
else:
raise NotImplementedError
log_dir = f"{dir_log}/{name_graph}/{prefix}/{timestamp}-{git_rev.strip():s}/"
if not os.path.exists(log_dir):
os.makedirs(log_dir)
yml_file = f"{log_dir}/{yml_name_key}.yml"
with open(yml_file, 'w') as f:
yaml.dump(config_new, f, default_flow_style=False, sort_keys=False)
return log_dir
# =============== #
# ADJ UTILS #
# =============== #
def get_deg_torch_sparse(adj):
return scatter(adj._values(), adj._indices()[0], reduce="sum")
def adj_norm_rw(adj, deg=None, dropedge=0., sort_indices=True):
"""
Normalize adj according to the method of rw normalization.
Note that sym norm is used in the original GCN paper (kipf),
while rw norm is used in GraphSAGE and some other variants.
# Procedure:
# 1. adj add self-connection --> adj'
# 2. D' deg matrix from adj'
# 3. norm by D^{-1} x adj'
if sort_indices is True, we re-sort the indices of the returned adj
Note that after 'dot' the indices of a node would be in descending order
rather than ascending order
"""
if type(adj) == torch.Tensor:
assert deg is None
assert torch.sum(adj._values()).cpu().long().item() == adj._values().size()[0]
_deg_orig = get_deg_torch_sparse(adj)
if dropedge > 0:
masked_indices = torch.floor(torch.rand(int(adj._values().size()[0] * dropedge)) * adj._values().size()[0]).long()
adj._values()[masked_indices] = 0
_deg_dropped = get_deg_torch_sparse(adj)
else:
_deg_dropped = _deg_orig
_deg = torch.repeat_interleave(_deg_dropped, _deg_orig.long())
_deg = torch.clamp(_deg, min=1)
_val = adj._values()
_val /= _deg
adj_norm = adj
else:
assert dropedge == 0., "not supporting dropedge for scipy csr matrices"
assert adj.shape[0] == adj.shape[1]
diag_shape = (adj.shape[0], adj.shape[1])
D = adj.sum(1).flatten() if deg is None else deg
D = np.clip(D, 1, None) # if deg_v == 0, it doesn't matter what value we clip it to.
norm_diag = sp.dia_matrix((1 / D, 0), shape=diag_shape)
adj_norm = norm_diag.dot(adj)
if sort_indices:
adj_norm.sort_indices()
return adj_norm
def adj_norm_sym(adj, sort_indices=True, add_self_edge=False, dropedge=0.):
assert adj.shape[0] == adj.shape[1]
| |
of each star
starsT = np.empty(nStars)
for j in range(nStars):
color_separation = (J_Hobs[j]-jhMod)**2+(H_Kobs[j]-hkMod)**2
min_separation_ind = np.argmin(color_separation)
starsT[j] = teffMod[min_separation_ind]
radeg = 180/np.pi
sweetSpot = dict(x=xval, y=yval, RA=allRA[targetIndex],
DEC=allDEC[targetIndex], jmag=Jmag[targetIndex])
# Offset between all stars and target
dRA = (allRA - sweetSpot['RA'])*np.cos(sweetSpot['DEC']/radeg)*3600
dDEC = (allDEC - sweetSpot['DEC'])*3600
# Put field stars positions and magnitudes in structured array
_ = dict(RA=allRA, DEC=allDEC, dRA=dRA, dDEC=dDEC, jmag=Jmag, T=starsT,
x=np.empty(nStars), y=np.empty(nStars), dx=np.empty(nStars),
dy=np.empty(nStars))
stars = np.empty(nStars,
dtype=[(key, val.dtype) for key, val in _.items()])
for key, val in _.items():
stars[key] = val
# Initialize final fits cube that contains the modelled traces
# with contamination
PAmin = 0 # instrument PA, degrees
PAmax = 360
dPA = 1 # degrees
# Set of IPA values to cover
PAtab = np.arange(PAmin, PAmax, dPA) # degrees
nPA = len(PAtab)
# Cube of trace simulation at every degree of field rotation,
# +target at O1 and O2
simuCube = np.zeros([nPA+1, dimY+1, dimX+1])
fitsFiles = glob.glob(os.path.join(TRACES_PATH, 'NIRCam_{}'.format(filter), 'o1*.0.fits'))
fitsFiles = np.sort(fitsFiles)
# Big loop to generate a simulation at each instrument PA
for kPA in range(PAtab.size):
APA = PAtab[kPA] # Aperture Position Angle (PA of instrument)
V3PA = APA+add_to_apa # from APT
sindx = np.sin(np.pi/2+APA/radeg)*stars['dDEC']
cosdx = np.cos(np.pi/2+APA/radeg)*stars['dDEC']
ps = pixel_scale
stars['dx'] = (np.cos(np.pi/2+APA/radeg)*stars['dRA']-sindx)/ps
stars['dy'] = (np.sin(np.pi/2+APA/radeg)*stars['dRA']+cosdx)/ps
stars['x'] = stars['dx']+sweetSpot['x']
stars['y'] = stars['dy']+sweetSpot['y']
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~NOTE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Retain stars that are within the Direct Image NIRISS POM FOV
# This extends the subarray edges to the detector edges.
# It keeps the stars that fall out of the subarray but still
# fall into the detector.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ind, = np.where((stars['x'] >= -8000) & (stars['x'] <= dimY+8000) &
(stars['y'] >= -8000) & (stars['y'] <= dimY+8000))
starsInFOV = stars[ind]
for i in range(len(ind)):
intx = round(starsInFOV['dx'][i])
inty = round(starsInFOV['dy'][i])
# This indexing assumes that teffMod is
# sorted the same way fitsFiles was sorted
k = np.where(teffMod == starsInFOV['T'][i])[0][0]
fluxscale = 10.0**(-0.4*(starsInFOV['jmag'][i]-sweetSpot['jmag']))
# deal with subection sizes
modelPadX = 0
modelPadY = 0
mx0 = int(modelPadX-intx)
mx1 = int(modelPadX-intx+dimX)
my0 = int(modelPadY-inty)
my1 = int(modelPadY-inty+dimY)
if (mx0 > dimX) or (my0 > dimY):
continue
if (mx1 < 0) or (my1 < 0):
continue
x0 = (mx0 < 0)*(-mx0)
y0 = (my0 < 0)*(-my0)
mx0 *= (mx0 >= 0)
mx1 = dimX if mx1 > dimX else mx1
my0 *= (my0 >= 0)
my1 = dimY if my1 > dimY else my1
# Fleshing out index 0 of the simulation cube (trace of target)
if (intx == 0) & (inty == 0) & (kPA == 0):
fNameModO12 = fitsFiles[k]
modelO1 = fits.getdata(fNameModO12, 1)
ord1 = modelO1[0, my0:my1, mx0:mx1]*fluxscale
simuCube[0, y0:y0+my1-my0, x0:x0+mx1-mx0] = ord1
# Fleshing out indexes 1-361 of the simulation cube
# (trace of neighboring stars at every position angle)
if (intx != 0) or (inty != 0):
fNameModO12 = fitsFiles[k]
modelO12 = fits.getdata(fNameModO12)
simuCube[kPA+1, y0:y0+my1-my0, x0:x0+mx1-mx0] += modelO12[0, my0:my1, mx0:mx1]*fluxscale
return simuCube
def lrsFieldSim(ra, dec, binComp=''):
""" Produce a Grism Time Series field simulation for a target.
Parameters
----------
ra : float
The RA of the target.
dec : float
The Dec of the target.
binComp : sequence
The parameters of a binary companion.
Returns
-------
simuCube : np.ndarray
The simulated data cube. Index 0 and 1 (axis=0) show the trace of
the target for orders 1 and 2 (respectively). Index 2-362 show the trace
of the target at every position angle (PA) of the instrument.
"""
# Calling the variables
dimX = 55
dimY = 427
rad = 2.5
pixel_scale = 0.11 # arsec
xval, yval = 38.5, 829.0
add_to_apa = 4.83425324
# stars in large field around target
targetcrd = crd.SkyCoord(ra=ra, dec=dec, unit=(u.hour, u.deg))
targetRA = targetcrd.ra.value
targetDEC = targetcrd.dec.value
info = Irsa.query_region(targetcrd, catalog='fp_psc', spatial='Cone',
radius=rad*u.arcmin)
# Coordinates of all the stars in FOV, including target
allRA = info['ra'].data.data
allDEC = info['dec'].data.data
Jmag = info['j_m'].data.data
Hmag = info['h_m'].data.data
Kmag = info['k_m'].data.data
J_Hobs = Jmag-Hmag
H_Kobs = Hmag-Kmag
# Coordiniates of target
aa = ((targetRA-allRA)*np.cos(targetDEC))
distance = np.sqrt(aa**2 + (targetDEC-allDEC)**2)
targetIndex = np.argmin(distance) # the target
# Add any missing companion
if binComp != '':
deg2rad = np.pi/180
bb = binComp[0]/3600/np.cos(allDEC[targetIndex]*deg2rad)
allRA = np.append(allRA, (allRA[targetIndex] + bb))
allDEC = np.append(allDEC, (allDEC[targetIndex] + binComp[1]/3600))
Jmag = np.append(Jmag, binComp[2])
Hmag = np.append(Kmag, binComp[3])
Kmag = np.append(Kmag, binComp[4])
J_Hobs = Jmag-Hmag
H_Kobs = Hmag-Kmag
# Number of stars
nStars = allRA.size
# Restoring model parameters
modelParam = readsav(os.path.join(TRACES_PATH, 'NIRISS', 'modelsInfo.sav'),
verbose=False)
models = modelParam['models']
modelPadX = modelParam['modelpadx']
modelPadY = modelParam['modelpady']
dimXmod = modelParam['dimxmod']
dimYmod = modelParam['dimymod']
jhMod = modelParam['jhmod']
hkMod = modelParam['hkmod']
#teffMod = modelParam['teffmod']
teffMod = np.linspace(2000, 6000, 41)
# Find/assign Teff of each star
starsT = np.empty(nStars)
for j in range(nStars):
color_separation = (J_Hobs[j]-jhMod)**2+(H_Kobs[j]-hkMod)**2
min_separation_ind = np.argmin(color_separation)
starsT[j] = teffMod[min_separation_ind]
radeg = 180/np.pi
sweetSpot = dict(x=xval, y=yval, RA=allRA[targetIndex],
DEC=allDEC[targetIndex], jmag=Jmag[targetIndex])
# Offset between all stars and target
dRA = (allRA - sweetSpot['RA'])*np.cos(sweetSpot['DEC']/radeg)*3600
dDEC = (allDEC - sweetSpot['DEC'])*3600
# Put field stars positions and magnitudes in structured array
_ = dict(RA=allRA, DEC=allDEC, dRA=dRA, dDEC=dDEC, jmag=Jmag, T=starsT,
x=np.empty(nStars), y=np.empty(nStars), dx=np.empty(nStars),
dy=np.empty(nStars))
stars = np.empty(nStars,
dtype=[(key, val.dtype) for key, val in _.items()])
for key, val in _.items():
stars[key] = val
# Initialize final fits cube that contains the modelled traces
# with contamination
PAmin = 0 # instrument PA, degrees
PAmax = 360
dPA = 1 # degrees
# Set of IPA values to cover
PAtab = np.arange(PAmin, PAmax, dPA) # degrees
nPA = len(PAtab)
# Cube of trace simulation at every degree of field rotation,
# +target at O1 and O2
simuCube = np.zeros([nPA+1, dimY+1, dimX+1])
fitsFiles = glob.glob(os.path.join(TRACES_PATH, 'MIRI', '_*.fits'))
fitsFiles = np.sort(fitsFiles)
# Big loop to generate a simulation at each instrument PA
for kPA in range(PAtab.size):
APA = PAtab[kPA] # Aperture Position Angle (PA of instrument)
V3PA = APA+add_to_apa # from APT
sindx = np.sin(np.pi/2+APA/radeg)*stars['dDEC']
cosdx = np.cos(np.pi/2+APA/radeg)*stars['dDEC']
ps = pixel_scale
stars['dx'] = (np.cos(np.pi/2+APA/radeg)*stars['dRA']-sindx)/ps
stars['dy'] = (np.sin(np.pi/2+APA/radeg)*stars['dRA']+cosdx)/ps
stars['x'] = stars['dx']+sweetSpot['x']
stars['y'] = stars['dy']+sweetSpot['y']
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~NOTE~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Retain stars that are within the Direct Image NIRISS POM FOV
# This extends the subarray edges to the detector edges.
# It keeps the stars that fall out of the subarray but still
# fall into the detector.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ind, = np.where((stars['x'] >= -8000) & (stars['x'] <= dimY+8000) &
(stars['y'] >= -8000) & (stars['y'] <= dimY+8000))
starsInFOV = stars[ind]
for i in range(len(ind)):
intx = round(starsInFOV['dx'][i])
inty = round(starsInFOV['dy'][i])
# This indexing assumes that teffMod is
# sorted the same way fitsFiles was sorted
k = np.where(teffMod == starsInFOV['T'][i])[0][0]
fluxscale = 10.0**(-0.4*(starsInFOV['jmag'][i]-sweetSpot['jmag']))
# deal with subection sizes
modelPadX = 0
modelPadY = 0
mx0 = int(modelPadX-intx)
mx1 = int(modelPadX-intx+dimX)
my0 = int(modelPadY-inty)
my1 = int(modelPadY-inty+dimY)
if (mx0 > dimX) or (my0 > dimY):
continue
if (mx1 < 0) or (my1 < 0):
continue
x0 = (mx0 < 0)*(-mx0)
y0 = (my0 < 0)*(-my0)
mx0 *= (mx0 >= 0)
mx1 = dimX if mx1 > dimX else mx1
my0 *= (my0 >= 0)
my1 = dimY if my1 > dimY else my1
# Fleshing out index 0 of the simulation cube (trace of target)
if (intx == 0) & (inty == 0) & (kPA == 0):
fNameModO12 = fitsFiles[k]
modelO1 = fits.getdata(fNameModO12, 1)
ord1 = modelO1[0, my0:my1, mx0:mx1]*fluxscale
simuCube[0, y0:y0+my1-my0, x0:x0+mx1-mx0] = ord1
# Fleshing out indexes 1-361 of the simulation cube
# (trace of neighboring stars at every position angle)
if (intx != 0) or (inty != 0):
fNameModO12 = fitsFiles[k]
modelO12 = fits.getdata(fNameModO12)
simuCube[kPA+1, y0:y0+my1-my0, x0:x0+mx1-mx0] += modelO12[0, my0:my1, mx0:mx1]*fluxscale
return simuCube
def fieldSim(ra, dec, instrument, binComp=''):
""" Wraps ``sossFieldSim``, ``gtsFieldSim``, and ``lrsFieldSim`` together.
Produces a field simulation for a target using any instrument (NIRISS,
NIRCam, or MIRI).
Parameters
----------
ra : float
The RA of the target.
dec : float
The Dec of the target.
instrument : str
The instrument the contamination is being calculated for.
Can either be (case-sensitive):
'NIRISS', 'NIRCam | |
== 3:
if ftype == TType.I64:
self.timestamp = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I32:
self.consistency_level = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('remove_args')
if self.key != None:
oprot.writeFieldBegin('key', TType.STRING, 1)
oprot.writeString(self.key)
oprot.writeFieldEnd()
if self.column_path != None:
oprot.writeFieldBegin('column_path', TType.STRUCT, 2)
self.column_path.write(oprot)
oprot.writeFieldEnd()
if self.timestamp != None:
oprot.writeFieldBegin('timestamp', TType.I64, 3)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 4)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.key is None:
raise TProtocol.TProtocolException(message='Required field key is unset!')
if self.column_path is None:
raise TProtocol.TProtocolException(message='Required field column_path is unset!')
if self.timestamp is None:
raise TProtocol.TProtocolException(message='Required field timestamp is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class remove_result:
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('remove_result')
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te != None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class batch_mutate_args:
"""
Attributes:
- mutation_map
- consistency_level
"""
thrift_spec = (
None, # 0
(1, TType.MAP, 'mutation_map', (TType.STRING,None,TType.MAP,(TType.STRING,None,TType.LIST,(TType.STRUCT,(Mutation, Mutation.thrift_spec)))), None, ), # 1
(2, TType.I32, 'consistency_level', None, 1, ), # 2
)
def __init__(self, mutation_map=None, consistency_level=thrift_spec[2][4],):
self.mutation_map = mutation_map
self.consistency_level = consistency_level
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.MAP:
self.mutation_map = {}
(_ktype128, _vtype129, _size127 ) = iprot.readMapBegin()
for _i131 in xrange(_size127):
_key132 = iprot.readString();
_val133 = {}
(_ktype135, _vtype136, _size134 ) = iprot.readMapBegin()
for _i138 in xrange(_size134):
_key139 = iprot.readString();
_val140 = []
(_etype144, _size141) = iprot.readListBegin()
for _i145 in xrange(_size141):
_elem146 = Mutation()
_elem146.read(iprot)
_val140.append(_elem146)
iprot.readListEnd()
_val133[_key139] = _val140
iprot.readMapEnd()
self.mutation_map[_key132] = _val133
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.consistency_level = iprot.readI32();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('batch_mutate_args')
if self.mutation_map != None:
oprot.writeFieldBegin('mutation_map', TType.MAP, 1)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.mutation_map))
for kiter147,viter148 in self.mutation_map.items():
oprot.writeString(kiter147)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter148))
for kiter149,viter150 in viter148.items():
oprot.writeString(kiter149)
oprot.writeListBegin(TType.STRUCT, len(viter150))
for iter151 in viter150:
iter151.write(oprot)
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.consistency_level != None:
oprot.writeFieldBegin('consistency_level', TType.I32, 2)
oprot.writeI32(self.consistency_level)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.mutation_map is None:
raise TProtocol.TProtocolException(message='Required field mutation_map is unset!')
if self.consistency_level is None:
raise TProtocol.TProtocolException(message='Required field consistency_level is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class batch_mutate_result:
"""
Attributes:
- ire
- ue
- te
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
(3, TType.STRUCT, 'te', (TimedOutException, TimedOutException.thrift_spec), None, ), # 3
)
def __init__(self, ire=None, ue=None, te=None,):
self.ire = ire
self.ue = ue
self.te = te
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRUCT:
self.te = TimedOutException()
self.te.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('batch_mutate_result')
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
if self.te != None:
oprot.writeFieldBegin('te', TType.STRUCT, 3)
self.te.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class truncate_args:
"""
Attributes:
- cfname
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'cfname', None, None, ), # 1
)
def __init__(self, cfname=None,):
self.cfname = cfname
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.cfname = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('truncate_args')
if self.cfname != None:
oprot.writeFieldBegin('cfname', TType.STRING, 1)
oprot.writeString(self.cfname)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.cfname is None:
raise TProtocol.TProtocolException(message='Required field cfname is unset!')
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class truncate_result:
"""
Attributes:
- ire
- ue
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'ire', (InvalidRequestException, InvalidRequestException.thrift_spec), None, ), # 1
(2, TType.STRUCT, 'ue', (UnavailableException, UnavailableException.thrift_spec), None, ), # 2
)
def __init__(self, ire=None, ue=None,):
self.ire = ire
self.ue = ue
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.ire = InvalidRequestException()
self.ire.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.ue = UnavailableException()
self.ue.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('truncate_result')
if self.ire != None:
oprot.writeFieldBegin('ire', TType.STRUCT, 1)
self.ire.write(oprot)
oprot.writeFieldEnd()
if self.ue != None:
oprot.writeFieldBegin('ue', TType.STRUCT, 2)
self.ue.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class describe_schema_versions_args:
thrift_spec = | |
"threshold": 1e-2},
}
vec5 = a.create(H2O)
self.assertTrue(not np.allclose(vec4, vec5))
def test_flatten(self):
"""Tests that flattened, and non-flattened output works correctly."""
system = H2O
n = 10
n_species = len(set(system.get_atomic_numbers()))
# K1 unflattened
desc = MBTR(
species=[1, 8],
k1={
"grid": {"n": n, "min": 1, "max": 8, "sigma": 0.1},
"geometry": {"function": "atomic_number"},
},
periodic=False,
flatten=False,
sparse=False,
)
feat = desc.create(system)["k1"]
self.assertEqual(feat.shape, (n_species, n))
# K1 flattened.
desc.flatten = True
feat = desc.create(system)
self.assertEqual(feat.shape, (n_species * n,))
def test_parallel_dense(self):
"""Tests creating dense output parallelly."""
samples = [molecule("CO"), molecule("N2O")]
desc = copy.deepcopy(default_desc_k2)
desc.species = ["C", "O", "N"]
n_features = desc.get_number_of_features()
# Determining number of jobs based on the amount of CPUs
desc.create(system=samples, n_jobs=-1, only_physical_cores=False)
desc.create(system=samples, n_jobs=-1, only_physical_cores=True)
# Multiple systems, serial job
output = desc.create(
system=samples,
n_jobs=1,
)
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0])
assumed[1, :] = desc.create(samples[1])
self.assertTrue(np.allclose(output, assumed))
# Multiple systems, parallel job
output = desc.create(
system=samples,
n_jobs=2,
)
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0])
assumed[1, :] = desc.create(samples[1])
self.assertTrue(np.allclose(output, assumed))
# Non-flattened output
desc._flatten = False
output = desc.create(
system=samples,
n_jobs=2,
)
assumed = []
assumed.append(desc.create(samples[0]))
assumed.append(desc.create(samples[1]))
for i, val in enumerate(output):
for key in val.keys():
i_tensor = val[key]
j_tensor = assumed[i][key]
self.assertTrue(np.allclose(i_tensor, j_tensor))
def test_parallel_sparse(self):
"""Tests creating sparse output parallelly."""
# Test indices
samples = [molecule("CO"), molecule("N2O")]
desc = copy.deepcopy(default_desc_k2)
desc.species = ["C", "O", "N"]
desc.sparse = True
n_features = desc.get_number_of_features()
# Multiple systems, serial job
output = desc.create(
system=samples,
n_jobs=1,
).todense()
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0]).todense()
assumed[1, :] = desc.create(samples[1]).todense()
self.assertTrue(np.allclose(output, assumed))
# Multiple systems, parallel job
output = desc.create(
system=samples,
n_jobs=2,
).todense()
assumed = np.empty((2, n_features))
assumed[0, :] = desc.create(samples[0]).todense()
assumed[1, :] = desc.create(samples[1]).todense()
self.assertTrue(np.allclose(output, assumed))
def test_periodic_supercell_similarity(self):
"""Tests that the output spectrum of various supercells of the same
crystal is identical after it is normalized.
"""
decay = 1
desc = MBTR(
species=["H"],
periodic=True,
k1={
"geometry": {"function": "atomic_number"},
"grid": {"min": 0, "max": 2, "sigma": 0.1, "n": 100},
},
k2={
"geometry": {"function": "inverse_distance"},
"grid": {"min": 0, "max": 1.0, "sigma": 0.02, "n": 200},
"weighting": {
"function": "exp",
"scale": decay,
"threshold": 1e-3,
},
},
k3={
"geometry": {"function": "cosine"},
"grid": {"min": -1.0, "max": 1.0, "sigma": 0.02, "n": 200},
"weighting": {
"function": "exp",
"scale": decay,
"threshold": 1e-3,
},
},
flatten=True,
sparse=False,
normalization="l2_each",
)
# Create various supercells for the FCC structure
a1 = bulk("H", "fcc", a=2.0) # Primitive
a2 = a1 * [2, 2, 2] # Supercell
a3 = bulk("H", "fcc", a=2.0, orthorhombic=True) # Orthorhombic
a4 = bulk("H", "fcc", a=2.0, cubic=True) # Conventional cubic
output = desc.create([a1, a2, a3, a4])
# Test for equality
self.assertTrue(np.allclose(output[0, :], output[0, :], atol=1e-5, rtol=0))
self.assertTrue(np.allclose(output[0, :], output[1, :], atol=1e-5, rtol=0))
self.assertTrue(np.allclose(output[0, :], output[2, :], atol=1e-5, rtol=0))
self.assertTrue(np.allclose(output[0, :], output[3, :], atol=1e-5, rtol=0))
def test_normalization(self):
"""Tests that each normalization method works correctly."""
n = 100
desc = copy.deepcopy(default_desc_k1_k2_k3)
desc.species = ("H", "O")
desc.normalization = "none"
desc.flatten = False
desc.sparse = False
# Calculate the norms
feat1 = desc.create(H2O)
k1 = feat1["k1"]
k2 = feat1["k2"]
k3 = feat1["k3"]
k1_norm = np.linalg.norm(k1.ravel())
k2_norm = np.linalg.norm(k2.ravel())
k3_norm = np.linalg.norm(k3.ravel())
# Test normalization of non-flat dense output with l2_each
desc.normalization = "l2_each"
feat2 = desc.create(H2O)
k1_each = feat2["k1"]
k2_each = feat2["k2"]
k3_each = feat2["k3"]
self.assertTrue(np.array_equal(k1 / k1_norm, k1_each))
self.assertTrue(np.array_equal(k2 / k2_norm, k2_each))
self.assertTrue(np.array_equal(k3 / k3_norm, k3_each))
# Flattened dense output
desc.flatten = True
desc.normalization = "none"
feat_flat = desc.create(H2O)
# Test normalization of flat dense output with l2_each
desc.sparse = False
desc.normalization = "l2_each"
n_elem = len(desc.species)
feat = desc.create(H2O)
n1 = int(n * n_elem)
n2 = int((n_elem * (n_elem + 1) / 2) * n)
a1 = feat_flat[0:n1] / k1_norm
a2 = feat_flat[n1 : n1 + n2] / k2_norm
a3 = feat_flat[n1 + n2 :] / k3_norm
feat_flat_manual_norm_each = np.hstack((a1, a2, a3))
self.assertTrue(
np.allclose(feat[:], feat_flat_manual_norm_each, atol=1e-7, rtol=0)
)
# Test normalization of flat sparse output with l2_each
desc.sparse = True
desc.normalization = "l2_each"
feat = desc.create(H2O).todense()
self.assertTrue(
np.allclose(feat, feat_flat_manual_norm_each, atol=1e-7, rtol=0)
)
# Test normalization of flat dense output with n_atoms
desc.sparse = False
desc.normalization = "n_atoms"
n_atoms = len(H2O)
n_elem = len(desc.species)
feat = desc.create(H2O)
self.assertTrue(np.allclose(feat, feat_flat / n_atoms, atol=1e-7, rtol=0))
# Test normalization of flat sparse output with n_atoms
desc.sparse = True
desc.normalization = "n_atoms"
feat = desc.create(H2O).todense()
self.assertTrue(np.allclose(feat, feat_flat / n_atoms, atol=1e-7, rtol=0))
# Test normalization with valle_oganov
# For k2 term, test for one pair of different atoms and one pair
# of the same atom (since the normalization differs)
desc = copy.deepcopy(default_desc_k2)
desc.species = ("H", "O")
desc.normalization = "none"
desc.periodic = True
desc.flatten = False
desc.sparse = False
# Calculate normalized output
feat = desc.create(H2O)
V = H2O.cell.volume
feat["k2"][0, 0, :] = feat["k2"][0, 0, :] * V / (2 * 2 * 2 * np.pi)
feat["k2"][0, 1, :] = feat["k2"][0, 1, :] * V / (2 * 1 * 4 * np.pi)
# Create normalized output
desc.normalization = "valle_oganov"
feat2 = desc.create(H2O)
self.assertTrue(np.array_equal(feat["k2"][0, 0, :], feat2["k2"][0, 0, :]))
self.assertTrue(np.array_equal(feat["k2"][0, 1, :], feat2["k2"][0, 1, :]))
# Test again for k3 term, here one triplet is enough
desc = copy.deepcopy(default_desc_k3)
desc.species = ("H", "O")
desc.normalization = "none"
desc.periodic = True
desc.flatten = False
desc.sparse = False
# Calculate normalized output
feat = desc.create(H2O)
V = H2O.cell.volume
feat["k3"][0, 0, 1, :] = feat["k3"][0, 0, 1, :] * V / (2 * 2 * 1)
# Create normalized output
desc.normalization = "valle_oganov"
feat2 = desc.create(H2O)
self.assertTrue(np.array_equal(feat["k3"][0, 0, 1, :], feat2["k3"][0, 0, 1, :]))
def test_k1_peaks_finite(self):
"""Tests the correct peak locations and intensities are found for the
k=1 term.
"""
desc = MBTR(
species=[1, 8],
k1={
"geometry": {"function": "atomic_number"},
"grid": {"min": 0, "max": 9, "sigma": 0.5, "n": 1000},
},
normalize_gaussians=False,
periodic=False,
flatten=True,
sparse=False,
)
features = desc.create(H2O)
x = desc.get_k1_axis()
# Check the H peaks
h_feat = features[desc.get_location(("H"))]
h_peak_indices = find_peaks(h_feat, prominence=1)[0]
h_peak_locs = x[h_peak_indices]
h_peak_ints = h_feat[h_peak_indices]
self.assertTrue(np.allclose(h_peak_locs, [1], rtol=0, atol=1e-2))
self.assertTrue(np.allclose(h_peak_ints, [2], rtol=0, atol=1e-2))
# Check the O peaks
o_feat = features[desc.get_location(("O"))]
o_peak_indices = find_peaks(o_feat, prominence=1)[0]
o_peak_locs = x[o_peak_indices]
o_peak_ints = o_feat[o_peak_indices]
self.assertTrue(np.allclose(o_peak_locs, [8], rtol=0, atol=1e-2))
self.assertTrue(np.allclose(o_peak_ints, [1], rtol=0, atol=1e-2))
# Check that everything else is zero
features[desc.get_location(("H"))] = 0
features[desc.get_location(("O"))] = 0
self.assertEqual(features.sum(), 0)
def test_k2_peaks_finite(self):
"""Tests the correct peak locations and intensities are found for the
k=2 term in finite systems.
"""
desc = MBTR(
species=[1, 8],
k2={
"geometry": {"function": "distance"},
"grid": {"min": -1, "max": 3, "sigma": 0.5, "n": 1000},
"weighting": {"function": "unity"},
},
normalize_gaussians=False,
periodic=False,
flatten=True,
sparse=False,
)
features = desc.create(H2O)
pos = H2O.get_positions()
x = desc.get_k2_axis()
# Check the H-H peaks
hh_feat = features[desc.get_location(("H", "H"))]
hh_peak_indices = find_peaks(hh_feat, prominence=0.5)[0]
hh_peak_locs = x[hh_peak_indices]
hh_peak_ints = hh_feat[hh_peak_indices]
self.assertTrue(
np.allclose(
hh_peak_locs, [np.linalg.norm(pos[0] - pos[2])], rtol=0, atol=1e-2
)
)
self.assertTrue(np.allclose(hh_peak_ints, [1], rtol=0, atol=1e-2))
# Check the O-H peaks
ho_feat = features[desc.get_location(("H", "O"))]
ho_peak_indices = find_peaks(ho_feat, prominence=0.5)[0]
ho_peak_locs = x[ho_peak_indices]
ho_peak_ints = ho_feat[ho_peak_indices]
self.assertTrue(
np.allclose(
ho_peak_locs, np.linalg.norm(pos[0] - pos[1]), rtol=0, atol=1e-2
)
)
self.assertTrue(np.allclose(ho_peak_ints, [2], rtol=0, atol=1e-2))
# Check that everything else is zero
features[desc.get_location(("H", "H"))] = 0
features[desc.get_location(("H", "O"))] = 0
self.assertEqual(features.sum(), 0)
def test_k2_peaks_periodic(self):
"""Tests the correct peak locations and intensities are found for the
k=2 term in periodic systems.
"""
atoms = Atoms(
cell=[
[10, 0, 0],
[10, 10, 0],
[10, 0, 10],
],
symbols=["H", "C"],
scaled_positions=[
[0.1, 0.5, 0.5],
[0.9, 0.5, 0.5],
],
pbc=True,
)
desc = MBTR(
species=["H", "C"],
k2={
"geometry": {"function": "distance"},
"grid": {"min": 0, "max": 10, "sigma": 0.5, "n": 1000},
"weighting": {"function": "exp", "scale": 0.8, "threshold": 1e-3},
},
normalize_gaussians=False,
periodic=True,
flatten=True,
sparse=False,
)
features = desc.create(atoms)
x = desc.get_k2_axis()
# Calculate assumed locations and intensities.
assumed_locs = np.array([2, 8])
assumed_ints = np.exp(-0.8 * np.array([2, 8]))
assumed_ints[0] *= 2 # There are two periodic distances at 2Å
assumed_ints[
0
] /= (
2 # The periodic distances ar halved because they belong to different cells
)
# Check the H-C peaks
hc_feat = features[desc.get_location(("H", "C"))]
hc_peak_indices = find_peaks(hc_feat, prominence=0.001)[0]
hc_peak_locs = x[hc_peak_indices]
hc_peak_ints = hc_feat[hc_peak_indices]
self.assertTrue(np.allclose(hc_peak_locs, assumed_locs, | |
<filename>main.py
import json
import webapp2
import random, string
import os
import cgi
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
import urllib
from cStringIO import StringIO
#Logging for....logging?
import logging
# RESTful permissions
PERMISSION_ANYONE = 'anyone'
PERMISSION_LOGGED_IN_USER = 'logged_in_user'
PERMISSION_OWNER_USER = 'owner_user'
PERMISSION_ADMIN = 'admin'
customer_keys = []
book_keys = []
objects = []
client_states = []
google_get_url = "https://accounts.google.com/o/oauth2/v2/auth"
google_post_url = "https://www.googleapis.com/oauth2/v4/token"
google_plus_url = "https://www.googleapis.com/plus/v1/people/me"
client_id = "620609018385-j0o29rkh4uke0abka57v75k538el685n.apps.googleusercontent.com"
client_secret = "uGAU8L-zTywTh6Pry1cse57B"
redirect_uri = "https://lasthope-155502.appspot.com/oauth"
state = ''
"""
Create Database Models
"""
class bookModel(ndb.Model):
id = ndb.IntegerProperty()
title = ndb.StringProperty()
isbn = ndb.StringProperty()
genre = ndb.StringProperty(repeated=True)
author = ndb.StringProperty()
checkedIn = ndb.BooleanProperty()
class customerModel(ndb.Model):
id = ndb.IntegerProperty()
name = ndb.StringProperty()
balance = ndb.StringProperty()
checked_out = ndb.StringProperty(repeated=True)
class WelcomeHandler(webapp2.RequestHandler):
def get(self):
self.response.write('<form action="" method="post"><button name="auth" value="signIn"> Let\'s try OAuth2.0! </button> </form>')
def post(self):
x = self.request.get("auth")
if x == "signIn":
#Generate a pseudo random State for this request.
state = ''.join(random.SystemRandom().choice(string.ascii_uppercase + string.digits) for _ in range(20))
#Create our form data
form_fields = {
'response_type':'code',
'client_id': client_id,
'redirect_uri': redirect_uri,
'scope': 'email',
'state': state
}
param_data = urllib.urlencode(form_fields)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
result = urlfetch.fetch(
url=google_get_url + "?" + param_data,
payload=None,
method=urlfetch.GET,
headers=headers
)
client_states.append(state)
self.response.write(result.content)
except urlfetch.Error as e:
self.response.write("Error! " + e)
def delete(self):
b = bookModel.query()
c = customerModel.query()
for book in b:
book.key.delete()
for customer in c:
customer.key.delete()
self.response.write("Successfully deleted all books and customers.")
"""
Customer Handler
Provides a REST api for creating, updating, and deleting book objects
"""
class BookListHandler(webapp2.RequestHandler):
b = bookModel.query()
"""
RESTful GET @ /books/
"""
def get(self, args):
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
output = []
#/books
if args == "" or args == "/":
self.response.headers['Content-Type'] = 'application/json'
if self.request.get("checkedIn"):
flag = self.request.get("checkedIn").capitalize()
if flag == "True":
books = self.b.filter(bookModel.checkedIn == True)
else:
books = self.b.filter(bookModel.checkedIn == False)
for book in books:
output.append(json.dumps(book.to_dict()))
else:
for book in self.b:
output.append(json.dumps(book.to_dict()))
elif path_len == 2 and path[1].isdigit():
desired_book = self.b.filter(bookModel.id == int(path[1])).get()
self.response.headers['Content-Type'] = 'application/json'
output.append(json.dumps(desired_book.to_dict()))
self.response.write( ",".join(output).join(("[", "]")))
def post(self, args):
if args == "" or args == "/":
output = []
try:
checked_flag = False
if self.request.get('checkedIn') == ("True" or "true"):
checked_flag = True
new_book = bookModel(
id = 0,
title=self.request.get('title'),
isbn=self.request.get('isbn'),
genre=self.request.get_all('genre'),
author=self.request.get('author'),
checkedIn=checked_flag
)
book_key = new_book.put()
new_book.id = book_key.id()
#Send the new entry to the Datastore
new_book.put()
self.response.status = 201
self.response.write(json.dumps(new_book.to_dict()))
except Exception as e:
self.response.write(output)
else:
self.response.write(output)
def delete(self, args):
path_info = parse_url( args )
path_len = path_info[0]
path = path_info[1]
#Lets delete a customer by id
if path_len == 2 and path[1].isdigit():
book_by_id = self.b.filter(bookModel.id == int(path[1]))
for x in book_by_id:
x.key.delete()
elif args == "" or args == "/":
for book in self.b:
book.key.delete()
self.response.write("")
def put(self, args):
output = []
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
#Lets delete a customer by id
if path_len == 2 and path[1].isdigit():
book_by_id = self.b.filter(bookModel.id == int(path[1]))
for x in book_by_id:
#Replace the books contents
if self.request.get('title'):
x.title = self.request.get('title')
else:
x.title = ""
if self.request.get('author'):
x.author = self.request.get('author')
else:
x.author = ""
if self.request.get('isbn'):
x.isbn = self.request.get('isbn')
else:
x.isbn = ""
if self.request.get('genre'):
x.genre = self.request.get_all('genre')
else:
x.genre = []
if self.request.get('id'):
x.id = int(self.request.get('id'))
if self.request.get('checkedIn'):
x.checkedIn = bool(self.request.get('checkedIn'))
else:
x.checkedIn = True
x.put()
self.response.status = 201
output.append(json.dumps(x.to_dict()))
self.response.write( ",".join(output).join(("[", "]")))
def patch(self, args):
output = []
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
filepointer = StringIO(self.request.body)
form = cgi.FieldStorage(
fp = filepointer,
headers = self.request.headers,
environ= { 'REQUEST_METHOD' : 'PATCH',
'CONTENT_TYPE' : self.request.headers['content-type']
}
)
title = form.getfirst("title","")
author = form.getfirst("author","")
isbn = form.getfirst('isbn', "")
genres = form.getlist('genre')
bookId = form.getfirst('id', "")
checkFlag = form.getfirst('checkedIn', "")
#Lets delete a customer by id
if path_len == 2 and path[1].isdigit():
book = self.b.filter(bookModel.id == int(path[1])).get()
#Replace the books contents
if title:
book.title = title
if author:
book.author = author
if isbn:
book.isbn = isbn
if genres:
book.genre = genres
if bookId:
book.id = bookId
if checkFlag:
book.checkedIn = bool(checkFlag)
book.put()
self.response.status = 201
output.append(json.dumps(book.to_dict()))
self.response.write(",".join(output).join(("[", "]")))
"""
Customer Handler
Provides a REST api for creating, updating, and deleting Customer objects
"""
class CustomerListHandler(webapp2.RequestHandler):
c = customerModel.query()
def get(self, args):
output = []
#Checking a book in
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
#Get all of the customers
if args == "" or args == "/":
for customer in self.c:
output.append(json.dumps(customer.to_dict()))
self.response.headers['Content-Type'] = 'application/json'
self.response.write(",".join(output).join(("[", "]")))
elif path_len == 2 and path[1].isdigit():
cust_by_id = self.c.filter(customerModel.id == int(path[1])).get()
self.response.headers['Content-Type'] = 'application/json'
output.append(json.dumps(cust_by_id.to_dict()))
self.response.write(",".join(output).join(("[", "]")))
elif path_len == 4 and path[1].isdigit() and path[2] == "books":
cust_by_id = self.c.filter(customerModel.id == int(path[1])).get()
for x in cust_by_id.checked_out:
if x:
b_id = x.split("/")
if int(b_id[2]) == int(path[3]):
self.response.headers['Content-Type'] = 'application/json'
book = bookModel.query(bookModel.id == int(path[3])).get()
self.response.write(json.dumps(book.to_dict()))
elif path_len == 3 and path[1].isdigit() and path[2] == "books":
cust_by_id = self.c.filter(customerModel.id == int(path[1])).get()
for x in cust_by_id.checked_out:
if x:
b_id = x.split("/")
if b_id[2].isdigit():
self.response.headers['Content-Type'] = 'application/json'
book = bookModel.query(bookModel.id == int(b_id[2])).get()
output.append(json.dumps(book.to_dict()))
self.response.write(",".join(output).join(("[", "]")))
def post(self, args):
output = []
if args == "" or args == "/":
try:
new_customer = customerModel(
id = 0,
name=self.request.get('name'),
balance=self.request.get('balance'),
checked_out=self.request.get_all('checked_out')
)
customer_key = new_customer.put()
new_customer.id = customer_key.integer_id()
new_customer.put()
self.response.headers['Content-Type'] = 'application/json'
self.response.status = 201
self.response.write(json.dumps(new_customer.to_dict()))
except Exception as e:
self.response.write(output)
else:
self.response.write(output)
def delete(self, args):
#Checking a book in
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
if path_len == 2 and path[1].isdigit():
cust_by_id = self.c.filter(customerModel.id == int(path[1]))
for x in cust_by_id:
x.key.delete()
# Check In a book
if path_len == 4 and path[1].isdigit() and path[3].isdigit() and path[2] == "books":
c_id = path[1]
b_id = path[3]
cust_by_id = self.c.filter(customerModel.id == int(c_id)).get()
book_by_id = bookModel.query(bookModel.id == int(b_id)).get()
cust_by_id.checked_out.remove("/books/" + b_id)
book_by_id.checkedIn = True
book_by_id.put()
cust_by_id.put()
if args == "" or args == "/":
for cust in self.c:
cust.key.delete()
def put(self, args):
output = []
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
# Lets delete a customer by id
if path_len == 4 and path[1].isdigit() and path[3].isdigit() and path[2] == "books":
c_id = path[1]
b_id = path[3]
cust_by_id = self.c.filter(customerModel.id == int(c_id)).get()
book_by_id = bookModel.query(bookModel.id == int(b_id)).get()
cust_by_id.checked_out.append("/books/" + b_id)
book_by_id.checkedIn = False
book_by_id.put()
cust_by_id.put()
self.response.headers['Content-Type'] = 'application/json'
self.response.status = 201
output.append(json.dumps(cust_by_id.to_dict()))
#Lets update a customer by id
if path_len == 2 and path[1].isdigit():
cust_by_id = self.c.filter(customerModel.id == int(path[1]))
for x in cust_by_id:
#Replace the books contents
if self.request.get('name'):
x.name = self.request.get('name')
else:
x.name = ""
if self.request.get('balance'):
x.balance = self.request.get('balance')
else:
x.balance = ""
if self.request.get('checked_out'):
x.checked_out = self.request.get_all('checked_out')
else:
x.checked_out = []
if self.request.get('id'):
x.id = int(self.request.get('id'))
x.put()
self.response.status = 201
output.append(json.dumps(x.to_dict()))
self.response.write( ",".join(output).join(("[", "]")))
def patch(self, args):
output = []
path_info = parse_url(args)
path_len = path_info[0]
path = path_info[1]
filepointer = StringIO(self.request.body)
form = cgi.FieldStorage(
fp = filepointer,
headers = self.request.headers,
environ= { 'REQUEST_METHOD' : 'PATCH',
'CONTENT_TYPE' : self.request.headers['content-type']
}
)
name = form.getfirst("name","")
balance = form.getfirst("balance","")
cId = form.getfirst('id', "")
checkOut = form.getlist('checked_out')
#Lets delete a customer by id
if path_len == 2 and path[1].isdigit():
cust = self.c.filter(customerModel.id == int(path[1])).get()
#Replace the books contents
if name:
cust.name = name
if balance:
cust.balance = balance
if cId:
cust.id = cId
if checkOut:
cust.checked_out = checkOut
cust.put()
self.response.status = 201
output.append(json.dumps(cust.to_dict()))
self.response.write(",".join(output).join(("[", "]")))
class OAuthHandler(webapp2.RequestHandler):
def get(self):
given_state = self.request.get("state")
code = self.request.get("code")
if code and given_state in client_states:
# Create our form data
form_fields = {
'code': code,
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'grant_type': 'authorization_code',
}
param_data = urllib.urlencode(form_fields)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
try:
result = urlfetch.fetch(
url=google_post_url,
payload=param_data,
method=urlfetch.POST,
headers=headers
)
#Contains access_token, token_type, expires_in, id_token
info = json.loads(result.content)
access_token = info['access_token']
token_type = info['token_type']
expires_in = info['expires_in']
id_token = info['id_token']
headers = {'Authorization': token_type + " " + access_token}
try:
result = urlfetch.fetch(
url=google_plus_url,
payload=None,
method=urlfetch.GET,
headers=headers
)
profile_info = json.loads(result.content)
firstName = profile_info['name']['givenName']
lastName = profile_info['name']['familyName']
profile_url = profile_info['url']
response = "Hello " + firstName + " " + lastName + "!\n" \
+ "Feel free to checkout your Google + profile at " + profile_url + " ...\n" \
+ " Also here's the Super Secret randomly generated state we used: " + given_state
self.response.write(response)
except urlfetch.Error as e:
self.response.write("Error! | |
<reponame>SmirkCao/obscmd<filename>run.py<gh_stars>10-100
#!/usr/bin/python
# -*- coding:utf-8 -*-
import Queue
import base64
import hashlib
import logging
import logging.config
import logging.handlers
import multiprocessing
import os
import sys
import threading
import time
import traceback
from optparse import OptionParser
import util
import obspycmd
import results
import myLib.cloghandler
from copy import deepcopy
from Queue import Empty
from constant import ConfigFile
from constant import LOCAL_SYS
from constant import SYS_ENCODING
from constant import CONTENT_TYPES
from util import Counter
from util import ThreadsStopFlag
from util import RangeFileWriter
from util import User
logging.handlers.ConcurrentRotatingFileHandler = myLib.cloghandler.ConcurrentRotatingFileHandler
VERSION = 'v4.6.7'
RETRY_TIMES = 3
UPLOAD_PART_MIN_SIZE = 5 * 1024 ** 2
UPLOAD_PART_MAX_SIZE = 5 * 1024 ** 3
TEST_CASES = {
201: 'PutObject;put_object',
202: 'GetObject;get_object',
206: 'CopyObject;copy_object'
}
OBJECTS_QUEUE_SIZE = 10 ** 5
END_MARKER = "END_MARKER"
user = None
# configurations
running_config = {}
# upload tasks
all_files_queue = multiprocessing.Queue()
# download tasks
all_objects_queue = multiprocessing.Queue()
# statistic tasks
results_queue = multiprocessing.Queue()
# lock for process workers
lock = multiprocessing.Lock()
# lock for process workers result
lock_re = multiprocessing.Lock()
# result file for object manifest
manifest_file = ''
# count for all workers' concurrency
current_concurrency = multiprocessing.Value('i', 0)
# data size of all tasks
total_data = multiprocessing.Value('f', 0)
total_data_upload = 0
total_data_download = 0
number_of_objects_to_put = 0
def read_config(options, config_file_name=ConfigFile.FILE_CONFIG):
global user
try:
print 'start read file \n'
f = open(config_file_name, 'rw')
lines = f.readlines()
for line in lines:
line = line.strip()
if line and line[0] != '#':
running_config[line[:line.find('=')].strip()] = line[line.find(
'=') + 1:].strip()
else:
continue
f.close()
if (options.localPath or options.remoteDir) and (
options.downloadTarget or options.savePath):
parser.error("options are mutually exclusive")
if options.operation:
running_config['Operation'] = options.operation
if options.localPath:
running_config['LocalPath'] = options.localPath
if options.remoteDir:
running_config['RemoteDir'] = options.remoteDir
if options.downloadTarget:
running_config['DownloadTarget'] = options.downloadTarget
if options.savePath:
running_config['SavePath'] = options.savePath
if options.bucketName:
running_config['BucketNameFixed'] = options.bucketName
if options.AK:
running_config['AK'] = options.AK
if options.SK:
running_config['SK'] = options.SK
if options.DomainName:
running_config['DomainName'] = options.DomainName
if options.Region:
running_config['Region'] = options.Region
running_config['AK'] = prompt_for_input('AK', 'your account')
running_config['SK'] = prompt_for_input('SK', 'your account')
user = User('obscmd', running_config['AK'], running_config['SK'])
# Don't show SK on screen display
del running_config['SK']
if running_config['IsHTTPs'].lower() == 'true':
running_config['IsHTTPs'] = True
else:
running_config['IsHTTPs'] = False
running_config['ConnectTimeout'] = int(running_config['ConnectTimeout'])
if int(running_config['ConnectTimeout']) < 5:
running_config['ConnectTimeout'] = 5
if running_config['RemoteDir']:
running_config['RemoteDir'] = running_config['RemoteDir'].replace(
'\\', '/').strip('/')
if running_config['RemoteDir']:
running_config['RemoteDir'] = running_config['RemoteDir'] + '/'
running_config['DownloadTarget'] = running_config[
'DownloadTarget'].lstrip('/')
if running_config['VirtualHost'].lower() == 'true':
running_config['VirtualHost'] = True
else:
running_config['VirtualHost'] = False
if running_config['RecordDetails'].lower() == 'true':
running_config['RecordDetails'] = True
else:
running_config['RecordDetails'] = False
if running_config['BadRequestCounted'].lower() == 'true':
running_config['BadRequestCounted'] = True
else:
running_config['BadRequestCounted'] = False
if running_config['PrintProgress'].lower() == 'true':
running_config['PrintProgress'] = True
else:
running_config['PrintProgress'] = False
if running_config['IgnoreExist'].lower() == 'true':
running_config['IgnoreExist'] = True
else:
running_config['IgnoreExist'] = False
if running_config['CompareETag'].lower() == 'true':
running_config['CompareETag'] = True
else:
running_config['CompareETag'] = False
if running_config['CheckFileChanging'].lower() == 'true':
running_config['CheckFileChanging'] = True
else:
running_config['CheckFileChanging'] = False
if running_config['ArchiveAfterUpload'].lower() == 'true':
running_config['ArchiveAfterUpload'] = True
else:
running_config['ArchiveAfterUpload'] = False
if running_config['CheckRoot'].lower() == 'true':
running_config['CheckRoot'] = True
else:
running_config['CheckRoot'] = False
if running_config['CheckSoftLinks'].lower() == 'true':
running_config['CheckSoftLinks'] = True
else:
running_config['CheckSoftLinks'] = False
if running_config['ProxyPort']:
running_config['ProxyPort'] = int(running_config['ProxyPort'])
# User's input
running_config['Operation'] = prompt_for_input('Operation',
'operation(upload/download/copy)')
if not running_config['Operation'].lower() == 'upload' and not \
running_config['Operation'].lower() == 'download'and not \
running_config['Operation'].lower() == 'copy':
print 'Operation must be upload or download or copy, exit...'
exit()
if running_config['Operation'].lower() == 'upload':
running_config['Testcase'] = 201
elif running_config['Operation'].lower() == 'download':
running_config['Testcase'] = 202
elif running_config['Operation'].lower() == 'copy':
running_config['Testcase'] = 206
if running_config.get('MultipartObjectSize'):
running_config['PartSize'] = prompt_for_input('PartSize',
'multipart size')
if running_config['Operation'].lower() == 'upload' and int(
running_config['PartSize']) < UPLOAD_PART_MIN_SIZE:
running_config['PartSize'] = str(UPLOAD_PART_MIN_SIZE)
if running_config['Operation'].lower() == 'upload' and int(
running_config['PartSize']) > UPLOAD_PART_MAX_SIZE:
running_config['PartSize'] = str(UPLOAD_PART_MAX_SIZE)
if int(running_config['PartSize']) > int(
running_config.get('MultipartObjectSize')):
print 'In order to cut object(s) to pieces, PartSize must be less than MultipartObjectSize'
exit()
else:
running_config['MultipartObjectSize'] = '0'
running_config['Concurrency'] = int(running_config['Concurrency']) if \
running_config['Concurrency'] else 1
running_config['LongConnection'] = False
running_config['ConnectionHeader'] = ''
running_config['CollectBasicData'] = False
running_config['LatencyRequestsNumber'] = False
running_config['LatencyPercentileMap'] = False
running_config['StatisticsInterval'] = 3
running_config['LatencySections'] = '500,1000,3000,10000'
# If server side encryption is on, set https + AWSV4 on.
if running_config['SrvSideEncryptType']:
if not running_config['IsHTTPs']:
running_config['IsHTTPs'] = True
logging.warn(
'change IsHTTPs to True while use SrvSideEncryptType')
if running_config['AuthAlgorithm'] != 'AWSV4' and running_config[
'SrvSideEncryptType'].lower() == 'sse-kms':
running_config['AuthAlgorithm'] = 'AWSV4'
logging.warn(
'change AuthAlgorithm to AWSV4 while use SrvSideEncryptType = SSE-KMS')
except IOError,data:
print '[ERROR] Read config file %s error: %s' % (config_file_name, data)
sys.exit()
def initialize_object_name(target_in_local, keys_already_exist_list):
global total_data_upload
global number_of_objects_to_put
remote_dir = running_config['RemoteDir']
multi_part_object_size = int(running_config.get('MultipartObjectSize'))
part_size = int(running_config['PartSize'])
def generate_task_tuple(file_path):
global total_data_upload
file_path = file_path.strip()
if not os.path.isfile(file_path):
print '{target} is not a file. Skip it.'.format(target=file_path)
else:
key = os.path.split(file_path)[1]
if remote_dir:
key = remote_dir + key
task_tuple = None
if key.decode(SYS_ENCODING) not in keys_already_exist_list:
size = int(os.path.getsize(file_path))
total_data_upload += size
if size >= multi_part_object_size:
parts = size / part_size + 1
if parts > 10000:
msg_t = 'PartSize({part_size}) is too small.\n' \
'You have a file({file}) cut to more than 10,000 parts.\n' \
'Please make sure every file is cut to less than or equal to 10,000 parts. Exit...' \
.format(part_size=running_config['PartSize'],
file=key)
print msg_t
logging.warn(msg_t)
exit()
task_tuple = (key, size, file_path)
return task_tuple
if ',' not in target_in_local:
if running_config['CheckSoftLinks'] and os.path.islink(target_in_local):
logging.error(
"the local path [%s] is link, now exit!" % target_in_local)
exit()
if os.path.isdir(target_in_local):
top_dir = running_config['LocalPath'].split('/')[-1]
files = []
try:
files = os.listdir(target_in_local)
except OSError:
pass
if not files:
object_to_put = target_in_local.replace(
running_config['LocalPath'], top_dir)
object_to_put = object_to_put.lstrip('/') + '/'
key = target_in_local + '/'
if remote_dir:
object_to_put = remote_dir + object_to_put
all_files_queue.put((object_to_put, 0, key))
logging.debug("=== object_to_put : %s, keyfile : %s ===" % (
object_to_put, key))
number_of_objects_to_put += 1
else:
for fi in files:
fi_d = os.path.join(target_in_local, fi)
if running_config['CheckSoftLinks'] and os.path.islink(fi_d):
logging.warning(
'skip the file[%s] because it is link!' % fi_d)
continue
if os.path.isdir(fi_d):
logging.debug('scanning dir: ' + fi_d)
initialize_object_name(fi_d, keys_already_exist_list)
elif os.path.isfile(fi_d):
object_to_put = fi_d.replace(
running_config['LocalPath'], top_dir)
object_to_put = object_to_put.lstrip('/')
if remote_dir:
object_to_put = remote_dir + object_to_put
if object_to_put.decode(
SYS_ENCODING) not in keys_already_exist_list:
object_size = int(os.path.getsize(fi_d))
total_data_upload += object_size
if object_size >= multi_part_object_size:
parts_count = object_size / part_size + 1
if parts_count > 10000:
msg = 'PartSize({part_size}) is too small.\n' \
'You have a file({file}) cut to more than 10,000 parts.\n' \
'Please make sure every file is cut to less than or equal to 10,000 parts.\n' \
'Exit...' \
.format(
part_size=running_config['PartSize'],
file=object_to_put)
print msg
logging.error(msg)
exit()
all_files_queue.put(
(object_to_put, object_size, fi_d))
number_of_objects_to_put += 1
elif os.path.isfile(target_in_local):
task_t = generate_task_tuple(target_in_local)
if task_t:
all_files_queue.put(task_t)
number_of_objects_to_put += 1
else:
targets = target_in_local.split(',')
targets = list(set(targets))
for target in targets:
task_t = generate_task_tuple(target)
if task_t:
all_files_queue.put(task_t)
number_of_objects_to_put += 1
''' old get bucket,limit in key numbers in bucket
def get_all_keys_in_bucket(bucket_name, ak, sk, target_in_bucket=''):
from xml.etree import ElementTree
m = 'getting keys in bucket...'
print m
logging.warn(m)
lists_objects = []
targets = list(set(target_in_bucket.split(',')))
for target in targets:
target = target.strip()
list_objects = []
marker = ''
while marker is not None:
conn = obspycmd.MyHTTPConnection(host=running_config['DomainName'],
is_secure=running_config[
'IsHTTPs'],
ssl_version=running_config[
'sslVersion'],
timeout=running_config[
'ConnectTimeout'],
long_connection=running_config[
'LongConnection'],
conn_header=running_config[
'ConnectionHeader'],
proxy_host=running_config[
'ProxyHost'],
proxy_port=running_config[
'ProxyPort'],
proxy_username=running_config[
'ProxyUserName'],
proxy_password=running_config[
'ProxyPassWord'])
rest = obspycmd.OBSRequestDescriptor(
request_type='ListObjectsInBucket',
ak=ak, sk=sk,
auth_algorithm=running_config['AuthAlgorithm'],
virtual_host=running_config['VirtualHost'],
domain_name=running_config['DomainName'],
region=running_config['Region'])
rest.bucket = bucket_name
# List a directory
if target.endswith('/'):
dir_prefix = target.strip('/')
if dir_prefix:
dir_prefix = dir_prefix + '/'
rest.query_args['prefix'] = dir_prefix
elif target.endswith('*'):
prefix = target.strip('/').rstrip('*')
if prefix:
rest.query_args['prefix'] = prefix
# List an object
elif target:
rest.query_args['prefix'] = target
if marker:
rest.query_args['marker'] = marker
resp = obspycmd.OBSRequestHandler(rest, conn).make_request()
marker = resp.return_data
xml_body = resp.recv_body
logging.debug("=== response body is %s ===" % xml_body)
if not xml_body:
print 'Error in http request, please see log/*.log'
exit()
if '<Code>NoSuchBucket</Code>' in xml_body:
print 'No such bucket(%s), exit...' % bucket_name
logging.error('No such bucket(%s), exit...' % bucket_name)
exit()
root = ElementTree.fromstring(xml_body)
logging.debug("=== the elementTree of xml_body is %s ===" % root)
if '<Contents>' in xml_body:
logging.debug("=== root[6:] is %s, and root[5:] is %s ===" % (
root[6:], root[5:]))
if '<NextMarker>' in xml_body:
for contents_element in root[6:]:
if contents_element[0].text[-1] != '/':
# list_objects.append((contents_element[0].text,
# int(contents_element[3].text)))
all_objects_queue.put((contents_element[0].text,
int(contents_element[3].text)))
else:
for contents_element in root[5:]:
logging.debug(
"=== contents_element is %s, contents_element[0] is %s, contents_element[3] is %s ===" % (
contents_element, contents_element[0].text,
contents_element[3].text))
if contents_element[0].text[-1] != '/' or int(
contents_element[3].text) == 0:
list_objects.append((contents_element[0].text,
int(contents_element[3].text)))
# If target is a single object, check if it's in the bucket.
if target and not target.endswith(('/', '*')):
find_flag = False
for one_tuple in list_objects:
if target == one_tuple[0].encode(SYS_ENCODING):
find_flag = True
break
if not find_flag:
list_objects = []
| |
<reponame>ccgenomics/somaticseq
#!/usr/bin/env python3
import sys, argparse, gzip, os, re, subprocess, logging
MY_DIR = os.path.dirname(os.path.realpath(__file__))
PRE_DIR = os.path.join(MY_DIR, os.pardir)
sys.path.append( PRE_DIR )
import genomicFileHandler.genomic_file_handlers as genome
import vcfModifier.copy_TextFile as copy_TextFile
import somaticseq.combine_callers as combineCallers
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger = logging.getLogger('SomaticSeq')
logger.setLevel(logging.DEBUG)
logger.addHandler(ch)
adaTrainer = os.sep.join( (PRE_DIR, 'r_scripts', 'ada_model_builder_ntChange.R') )
adaPredictor = os.sep.join( (PRE_DIR, 'r_scripts', 'ada_model_predictor.R') )
def runPaired(outdir, ref, tbam, nbam, tumor_name='TUMOR', normal_name='NORMAL', truth_snv=None, truth_indel=None, classifier_snv=None, classifier_indel=None, pass_threshold=0.5, lowqual_threshold=0.1, hom_threshold=0.85, het_threshold=0.01, dbsnp=None, cosmic=None, inclusion=None, exclusion=None, mutect=None, indelocator=None, mutect2=None, varscan_snv=None, varscan_indel=None, jsm=None, sniper=None, vardict=None, muse=None, lofreq_snv=None, lofreq_indel=None, scalpel=None, strelka_snv=None, strelka_indel=None, tnscope=None, platypus=None, min_mq=1, min_bq=5, min_caller=0.5, somaticseq_train=False, ensembleOutPrefix='Ensemble.', consensusOutPrefix='Consensus.', classifiedOutPrefix='SSeq.Classified.', keep_intermediates=False):
import somaticseq.somatic_vcf2tsv as somatic_vcf2tsv
import somaticseq.SSeq_tsv2vcf as tsv2vcf
files_to_delete = set()
snvCallers = []
if mutect or mutect2: snvCallers.append('MuTect')
if varscan_snv: snvCallers.append('VarScan2')
if jsm: snvCallers.append('JointSNVMix2')
if sniper: snvCallers.append('SomaticSniper')
if vardict: snvCallers.append('VarDict')
if muse: snvCallers.append('MuSE')
if lofreq_snv: snvCallers.append('LoFreq')
if strelka_snv: snvCallers.append('Strelka')
if tnscope: snvCallers.append('TNscope')
if platypus: snvCallers.append('Platypus')
indelCallers = []
if indelocator or mutect2: indelCallers.append('MuTect')
if varscan_indel: indelCallers.append('VarScan2')
if vardict: indelCallers.append('VarDict')
if lofreq_indel: indelCallers.append('LoFreq')
if scalpel: indelCallers.append('Scalpel')
if strelka_indel: indelCallers.append('Strelka')
if tnscope: indelCallers.append('TNscope')
if platypus: indelCallers.append('Platypus')
# Function to combine individual VCFs into a simple VCF list of variants:
outSnv, outIndel, intermediateVcfs, tempFiles = combineCallers.combinePaired(outdir=outdir, ref=ref, tbam=tbam, nbam=nbam, inclusion=inclusion, exclusion=exclusion, mutect=mutect, indelocator=indelocator, mutect2=mutect2, varscan_snv=varscan_snv, varscan_indel=varscan_indel, jsm=jsm, sniper=sniper, vardict=vardict, muse=muse, lofreq_snv=lofreq_snv, lofreq_indel=lofreq_indel, scalpel=scalpel, strelka_snv=strelka_snv, strelka_indel=strelka_indel, tnscope=tnscope, platypus=platypus, keep_intermediates=True)
files_to_delete.add(outSnv)
files_to_delete.add(outIndel)
[ files_to_delete.add(i) for i in tempFiles ]
ensembleSnv = os.sep.join(( outdir, ensembleOutPrefix + 'sSNV.tsv' ))
ensembleIndel = os.sep.join(( outdir, ensembleOutPrefix + 'sINDEL.tsv' ))
###################### SNV ######################
mutect_infile = intermediateVcfs['MuTect2']['snv'] if intermediateVcfs['MuTect2']['snv'] else mutect
somatic_vcf2tsv.vcf2tsv(is_vcf=outSnv, nbam_fn=nbam, tbam_fn=tbam, truth=truth_snv, cosmic=cosmic, dbsnp=dbsnp, mutect=mutect_infile, varscan=varscan_snv, jsm=jsm, sniper=sniper, vardict=intermediateVcfs['VarDict']['snv'], muse=muse, lofreq=lofreq_snv, scalpel=None, strelka=strelka_snv, tnscope=intermediateVcfs['TNscope']['snv'], platypus=intermediateVcfs['Platypus']['snv'], dedup=True, min_mq=min_mq, min_bq=min_bq, min_caller=min_caller, ref_fa=ref, p_scale=None, outfile=ensembleSnv)
# Classify SNV calls
if classifier_snv:
classifiedSnvTsv = os.sep.join(( outdir, classifiedOutPrefix + 'sSNV.tsv' ))
classifiedSnvVcf = os.sep.join(( outdir, classifiedOutPrefix + 'sSNV.vcf' ))
subprocess.call( (adaPredictor, classifier_snv, ensembleSnv, classifiedSnvTsv) )
tsv2vcf.tsv2vcf(classifiedSnvTsv, classifiedSnvVcf, snvCallers, pass_score=pass_threshold, lowqual_score=lowqual_threshold, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=False, paired_mode=True, normal_sample_name=normal_name, tumor_sample_name=tumor_name, print_reject=True, phred_scaled=True)
else:
# Train SNV classifier:
if somaticseq_train and truth_snv:
subprocess.call( (adaTrainer, ensembleSnv, 'Consistent_Mates', 'Inconsistent_Mates') )
consensusSnvVcf = os.sep.join(( outdir, consensusOutPrefix + 'sSNV.vcf' ))
tsv2vcf.tsv2vcf(ensembleSnv, consensusSnvVcf, snvCallers, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=False, paired_mode=True, normal_sample_name=normal_name, tumor_sample_name=tumor_name, print_reject=True)
###################### INDEL ######################
mutect_infile = intermediateVcfs['MuTect2']['indel'] if intermediateVcfs['MuTect2']['indel'] else indelocator
somatic_vcf2tsv.vcf2tsv(is_vcf=outIndel, nbam_fn=nbam, tbam_fn=tbam, truth=truth_indel, cosmic=cosmic, dbsnp=dbsnp, mutect=mutect_infile, varscan=varscan_indel, vardict=intermediateVcfs['VarDict']['indel'], lofreq=lofreq_indel, scalpel=scalpel, strelka=strelka_indel, tnscope=intermediateVcfs['TNscope']['indel'], platypus=intermediateVcfs['Platypus']['indel'], dedup=True, min_mq=min_mq, min_bq=min_bq, min_caller=min_caller, ref_fa=ref, p_scale=None, outfile=ensembleIndel)
# Classify INDEL calls
if classifier_indel:
classifiedIndelTsv = os.sep.join(( outdir, classifiedOutPrefix + 'sINDEL.tsv' ))
classifiedIndelVcf = os.sep.join(( outdir, classifiedOutPrefix + 'sINDEL.vcf' ))
subprocess.call( (adaPredictor, classifier_indel, ensembleIndel, classifiedIndelTsv) )
tsv2vcf.tsv2vcf(classifiedIndelTsv, classifiedIndelVcf, indelCallers, pass_score=pass_threshold, lowqual_score=lowqual_threshold, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=False, paired_mode=True, normal_sample_name=normal_name, tumor_sample_name=tumor_name, print_reject=True, phred_scaled=True)
else:
# Train INDEL classifier:
if somaticseq_train and truth_indel:
subprocess.call( (adaTrainer, ensembleIndel, 'Strelka_QSS', 'Strelka_TQSS', 'Consistent_Mates', 'Inconsistent_Mates') )
consensusIndelVcf = os.sep.join(( outdir, consensusOutPrefix + 'sINDEL.vcf' ))
tsv2vcf.tsv2vcf(ensembleIndel, consensusIndelVcf, indelCallers, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=False, paired_mode=True, normal_sample_name=normal_name, tumor_sample_name=tumor_name, print_reject=True)
## Clean up after yourself ##
if not keep_intermediates:
for file_i in files_to_delete:
os.remove(file_i)
logger.info('Removed {}'.format( file_i ) )
def runSingle(outdir, ref, bam, sample_name='TUMOR', truth_snv=None, truth_indel=None, classifier_snv=None, classifier_indel=None, pass_threshold=0.5, lowqual_threshold=0.1, hom_threshold=0.85, het_threshold=0.01, dbsnp=None, cosmic=None, inclusion=None, exclusion=None, mutect=None, mutect2=None, varscan=None, vardict=None, lofreq=None, scalpel=None, strelka=None, min_mq=1, min_bq=5, min_caller=0.5, somaticseq_train=False, ensembleOutPrefix='Ensemble.', consensusOutPrefix='Consensus.', classifiedOutPrefix='SSeq.Classified.', keep_intermediates=False):
import somaticseq.single_sample_vcf2tsv as single_sample_vcf2tsv
import somaticseq.SSeq_tsv2vcf as tsv2vcf
files_to_delete = set()
snvCallers = []
if mutect or mutect2: snvCallers.append('MuTect')
if varscan: snvCallers.append('VarScan2')
if vardict: snvCallers.append('VarDict')
if lofreq: snvCallers.append('LoFreq')
if strelka: snvCallers.append('Strelka')
indelCallers = []
if mutect2: indelCallers.append('MuTect2')
if varscan: indelCallers.append('VarScan2')
if vardict: indelCallers.append('VarDict')
if lofreq: indelCallers.append('LoFreq')
if scalpel: indelCallers.append('Scalpel')
if strelka: indelCallers.append('Strelka')
# Function to combine individual VCFs into a simple VCF list of variants:
outSnv, outIndel, intermediateVcfs, tempFiles = combineCallers.combineSingle(outdir=outdir, ref=ref, bam=bam, inclusion=inclusion, exclusion=exclusion, mutect=mutect, mutect2=mutect2, varscan=varscan, vardict=vardict, lofreq=lofreq, scalpel=scalpel, strelka=strelka, keep_intermediates=True)
files_to_delete.add(outSnv)
files_to_delete.add(outIndel)
[ files_to_delete.add(i) for i in tempFiles ]
ensembleSnv = os.sep.join(( outdir, ensembleOutPrefix + 'sSNV.tsv' ))
ensembleIndel = os.sep.join(( outdir, ensembleOutPrefix + 'sINDEL.tsv' ))
###################### SNV ######################
mutect_infile = intermediateVcfs['MuTect2']['snv'] if intermediateVcfs['MuTect2']['snv'] else mutect
single_sample_vcf2tsv.vcf2tsv(is_vcf=outSnv, bam_fn=bam, truth=truth_snv, cosmic=cosmic, dbsnp=dbsnp, mutect=mutect_infile, varscan=intermediateVcfs['VarScan2']['snv'], vardict=intermediateVcfs['VarDict']['snv'], lofreq=intermediateVcfs['LoFreq']['snv'], scalpel=None, strelka=intermediateVcfs['Strelka']['snv'], dedup=True, min_mq=min_mq, min_bq=min_bq, min_caller=min_caller, ref_fa=ref, p_scale=None, outfile=ensembleSnv)
# Classify SNV calls
if classifier_snv:
classifiedSnvTsv = os.sep.join(( outdir, classifiedOutPrefix + 'sSNV.tsv' ))
classifiedSnvVcf = os.sep.join(( outdir, classifiedOutPrefix + 'sSNV.vcf' ))
subprocess.call( (adaPredictor, classifier_snv, ensembleSnv, classifiedSnvTsv) )
tsv2vcf.tsv2vcf(classifiedSnvTsv, classifiedSnvVcf, snvCallers, pass_score=pass_threshold, lowqual_score=lowqual_threshold, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=True, paired_mode=False, tumor_sample_name=sample_name, print_reject=True, phred_scaled=True)
else:
# Train SNV classifier:
if somaticseq_train and truth_snv:
subprocess.call( (adaTrainer, ensembleSnv, 'Consistent_Mates', 'Inconsistent_Mates') )
consensusSnvVcf = os.sep.join(( outdir, consensusOutPrefix + 'sSNV.vcf' ))
tsv2vcf.tsv2vcf(ensembleSnv, consensusSnvVcf, snvCallers, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=True, paired_mode=False, tumor_sample_name=sample_name, print_reject=True)
###################### INDEL ######################
single_sample_vcf2tsv.vcf2tsv(is_vcf=outIndel, bam_fn=bam, truth=truth_indel, cosmic=cosmic, dbsnp=dbsnp, mutect=intermediateVcfs['MuTect2']['indel'], varscan=intermediateVcfs['VarScan2']['indel'], vardict=intermediateVcfs['VarDict']['indel'], lofreq=intermediateVcfs['LoFreq']['indel'], scalpel=scalpel, strelka=intermediateVcfs['Strelka']['indel'], dedup=True, min_mq=min_mq, min_bq=min_bq, min_caller=min_caller, ref_fa=ref, p_scale=None, outfile=ensembleIndel)
# Classify INDEL calls
if classifier_indel:
classifiedIndelTsv = os.sep.join(( outdir, classifiedOutPrefix + 'sINDEL.tsv' ))
classifiedIndelVcf = os.sep.join(( outdir, classifiedOutPrefix + 'sINDEL.vcf' ))
subprocess.call( (adaPredictor, classifier_indel, ensembleIndel, classifiedIndelTsv) )
tsv2vcf.tsv2vcf(classifiedIndelTsv, classifiedIndelVcf, indelCallers, pass_score=pass_threshold, lowqual_score=lowqual_threshold, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=True, paired_mode=False, tumor_sample_name=sample_name, print_reject=True, phred_scaled=True)
else:
# Train INDEL classifier:
if somaticseq_train and truth_indel:
subprocess.call( (adaTrainer, ensembleIndel, 'Strelka_QSS', 'Strelka_TQSS', 'Consistent_Mates', 'Inconsistent_Mates') )
consensusIndelVcf = os.sep.join(( outdir, consensusOutPrefix + 'sINDEL.vcf' ))
tsv2vcf.tsv2vcf(ensembleIndel, consensusIndelVcf, indelCallers, hom_threshold=hom_threshold, het_threshold=het_threshold, single_mode=True, paired_mode=False, tumor_sample_name=sample_name, print_reject=True)
## Clean up after yourself ##
if not keep_intermediates:
for file_i in files_to_delete:
os.remove(file_i)
logger.info('Removed {}'.format( file_i ) )
################################################
def run():
inputParameters = {}
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-outdir', '--output-directory', type=str, help='output directory', default='.')
parser.add_argument('-ref', '--genome-reference', type=str, help='.fasta.fai file to get the contigs', required=True)
parser.add_argument('--truth-snv', type=str, help='VCF of true hits')
parser.add_argument('--truth-indel', type=str, help='VCF of true hits')
parser.add_argument('--classifier-snv', type=str, help='RData for SNV')
parser.add_argument('--classifier-indel', type=str, help='RData for INDEL')
parser.add_argument('--pass-threshold', type=float, help='SCORE for PASS', default=0.5)
parser.add_argument('--lowqual-threshold', type=float, help='SCORE for LowQual', default=0.1)
parser.add_argument('-hom', '--homozygous-threshold', type=float, help='VAF for homozygous', default=0.85)
parser.add_argument('-het', '--heterozygous-threshold', type=float, help='VAF for heterozygous', default=0.01)
parser.add_argument('-minMQ', '--minimum-mapping-quality',type=float, help='Minimum mapping quality below which is considered poor', default=1)
parser.add_argument('-minBQ', '--minimum-base-quality', type=float, help='Minimum base quality below which is considered poor', default=5)
parser.add_argument('-mincaller', '--minimum-num-callers', type=float, help='Minimum number of tools to be considered', default=0.5)
parser.add_argument('-dbsnp', '--dbsnp-vcf', type=str, help='dbSNP VCF',)
parser.add_argument('-cosmic', '--cosmic-vcf', type=str, help='COSMIC VCF')
parser.add_argument('-include', '--inclusion-region', type=str, help='inclusion bed')
parser.add_argument('-exclude', '--exclusion-region', type=str, help='exclusion bed')
parser.add_argument('-nt', '--threads', type=int, help='number of threads', default=1)
parser.add_argument('--keep-intermediates', action='store_true', help='Keep intermediate files', default=False)
parser.add_argument('-train', '--somaticseq-train', action='store_true', help='Invoke training mode with ground truths', default=False)
# Modes:
sample_parsers = parser.add_subparsers(title="sample_mode")
# Paired Sample mode
parser_paired = sample_parsers.add_parser('paired')
parser_paired.add_argument('-tbam', '--tumor-bam-file', type=str, help='Tumor BAM File', required=True)
parser_paired.add_argument('-nbam', '--normal-bam-file', type=str, help='Normal BAM File', required=True)
parser_paired.add_argument('-tumorSM', '--tumor-sample', type=str, help='Tumor Name', default='TUMOR')
parser_paired.add_argument('-normalSM', '--normal-sample', type=str, help='Normal Name', default='NORMAL')
parser_paired.add_argument('-mutect', '--mutect-vcf', type=str, help='MuTect VCF', )
parser_paired.add_argument('-indelocator', '--indelocator-vcf', type=str, help='Indelocator VCF', )
parser_paired.add_argument('-mutect2', '--mutect2-vcf', type=str, help='MuTect2 VCF', )
parser_paired.add_argument('-varscansnv', '--varscan-snv', type=str, help='VarScan2 VCF', )
parser_paired.add_argument('-varscanindel', '--varscan-indel', type=str, help='VarScan2 VCF', )
parser_paired.add_argument('-jsm', '--jsm-vcf', type=str, help='JointSNVMix2 VCF', )
parser_paired.add_argument('-sniper', '--somaticsniper-vcf', type=str, help='SomaticSniper VCF', )
parser_paired.add_argument('-vardict', '--vardict-vcf', type=str, help='VarDict VCF', )
parser_paired.add_argument('-muse', '--muse-vcf', type=str, help='MuSE VCF', )
parser_paired.add_argument('-lofreqsnv', '--lofreq-snv', type=str, help='LoFreq VCF', )
parser_paired.add_argument('-lofreqindel', '--lofreq-indel', type=str, help='LoFreq VCF', )
parser_paired.add_argument('-scalpel', '--scalpel-vcf', type=str, help='Scalpel VCF', )
parser_paired.add_argument('-strelkasnv', '--strelka-snv', type=str, help='Strelka VCF', )
parser_paired.add_argument('-strelkaindel', '--strelka-indel', type=str, help='Strelka VCF', )
parser_paired.add_argument('-tnscope', '--tnscope-vcf', type=str, help='TNscope VCF', )
parser_paired.add_argument('-platypus', '--platypus-vcf', type=str, help='Platypus VCF', )
parser_paired.set_defaults(which='paired')
# Single Sample mode
parser_single = sample_parsers.add_parser('single')
parser_single.add_argument('-bam', '--bam-file', type=str, help='BAM File', required=True)
parser_single.add_argument('-SM', '--sample-name', type=str, help='Sample Name', default='TUMOR')
parser_single.add_argument('-mutect', '--mutect-vcf', type=str, help='MuTect VCF', )
parser_single.add_argument('-mutect2', '--mutect2-vcf', type=str, help='MuTect2 VCF', )
parser_single.add_argument('-varscan', '--varscan-vcf', type=str, help='VarScan2 VCF', )
parser_single.add_argument('-vardict', '--vardict-vcf', type=str, help='VarDict VCF', )
parser_single.add_argument('-lofreq', '--lofreq-vcf', type=str, help='LoFreq VCF', )
parser_single.add_argument('-scalpel', '--scalpel-vcf', type=str, help='Scalpel VCF', )
parser_single.add_argument('-strelka', '--strelka-vcf', type=str, help='Strelka VCF', )
parser_single.set_defaults(which='single')
args = parser.parse_args()
inputParameters = vars(args)
logger.info( 'SomaticSeq Input Arguments: ' + ', '.join( [ '{}={}'.format(i, inputParameters[i]) for i in inputParameters ] ) )
return inputParameters
################################################################################################
# Execute:
if __name__ == '__main__':
runParameters = run()
os.makedirs(runParameters['output_directory'], exist_ok=True)
if runParameters['which'] == 'paired':
runPaired( outdir = runParameters['output_directory'], \
ref = runParameters['genome_reference'], \
tbam = runParameters['tumor_bam_file'], \
nbam = runParameters['normal_bam_file'], \
tumor_name = runParameters['tumor_sample'], \
normal_name = runParameters['normal_sample'], \
truth_snv = runParameters['truth_snv'], \
truth_indel = runParameters['truth_indel'], \
classifier_snv = runParameters['classifier_snv'], \
classifier_indel = runParameters['classifier_indel'], \
pass_threshold = runParameters['pass_threshold'], \
lowqual_threshold = runParameters['lowqual_threshold'], \
hom_threshold = runParameters['homozygous_threshold'], \
het_threshold = runParameters['heterozygous_threshold'], \
min_mq = runParameters['minimum_mapping_quality'], \
min_bq = runParameters['minimum_base_quality'], \
min_caller = runParameters['minimum_num_callers'], \
dbsnp = runParameters['dbsnp_vcf'], \
cosmic = runParameters['cosmic_vcf'], \
| |
from __future__ import print_function
import timeit
import re
import pandas as pd
from datetime import datetime, timedelta
from flask_restplus import Namespace, Resource
from pymongo import MongoClient
api = Namespace('automatic_analysis', description='automatic_analysis')
uri = "mongodb://localhost:27017/gcm_gisaid"
client = MongoClient(uri)
db = client.gcm_gisaid
# collection_db = db.seq_2021_08_26_2
collection_db = db.seq_test_0
collection_update_date = db.db_meta
collection_result_variant_db = db.database_variants_test
PATTERN = re.compile("([a-zA-Z0-9]+)_([a-zA-Z~@#$^*()_+=[\]{}|\\,.?: -]+)([\d]+)([a-zA-Z~@#$^*()_+=[\]{}|\\,.?: -]+)")
world_growth_obj = {}
def world_growth(today_date, location_granularity):
last_week_date = today_date.replace(day=today_date.day) - timedelta(days=7)
previous_week_date = last_week_date.replace(day=last_week_date.day) - timedelta(days=7)
query = {
'c_coll_date_prec': {
'$eq': 2
},
}
query_this_week = query.copy()
query_prev_week = query.copy()
query_this_week['collection_date'] = {'$lt': today_date, '$gte': last_week_date}
query_prev_week['collection_date'] = {'$lt': last_week_date, '$gte': previous_week_date}
results_this_week = collection_db.count_documents(query_this_week)
results_prev_week = collection_db.count_documents(query_prev_week)
world_growth_obj['world_growth'] = {f'{location_granularity[0]}': None,
f'{location_granularity[1]}': None,
f'{location_granularity[2]}': None,
'lineage': None,
'mut': None,
'total_seq_world_prev_week': results_prev_week,
'total_seq_world_this_week': results_this_week,
'total_seq_pop_prev_week': results_prev_week,
'total_seq_pop_this_week': results_this_week,
'count_prev_week': results_prev_week,
'count_this_week': results_this_week,
'perc_prev_week': None,
'perc_this_week': None,
'diff_perc': None,
'date': today_date.strftime("%Y-%m-%d"),
'granularity': location_granularity[0]
}
count_all_sequences_for_geo = {'total': 0}
def lineage_growth(today_date, location_granularity, location_0, location_1, location_2, lineage):
last_week_date = today_date.replace(day=today_date.day) - timedelta(days=7)
previous_week_date = last_week_date.replace(day=last_week_date.day) - timedelta(days=7)
query = {
'c_coll_date_prec': {
'$eq': 2
},
f'location.{location_granularity[0]}': {
'$eq': location_0
},
f'location.{location_granularity[1]}': {
'$eq': location_1
},
f'location.{location_granularity[2]}': {
'$eq': location_2
},
'covv_lineage': {
'$eq': lineage
},
}
query_this_week = query.copy()
query_prev_week = query.copy()
query_this_week['collection_date'] = {'$lt': today_date, '$gte': last_week_date}
query_prev_week['collection_date'] = {'$lt': last_week_date, '$gte': previous_week_date}
results_this_week = collection_db.count_documents(query_this_week)
results_prev_week = collection_db.count_documents(query_prev_week)
denominator_world_prev_week = world_growth_obj['world_growth']['count_prev_week']
denominator_world_this_week = world_growth_obj['world_growth']['count_this_week']
result = {f'{location_granularity[0]}': location_0,
f'{location_granularity[1]}': location_1,
f'{location_granularity[2]}': location_2,
'lineage': lineage,
'mut': None,
'total_seq_world_prev_week': denominator_world_prev_week,
'total_seq_world_this_week': denominator_world_this_week,
'total_seq_pop_prev_week': results_prev_week,
'total_seq_pop_this_week': results_this_week,
'count_prev_week': results_prev_week,
'count_this_week': results_this_week,
'perc_prev_week': None,
'perc_this_week': None,
'diff_perc': None,
'date': today_date.strftime("%Y-%m-%d"),
'granularity': location_granularity[2]
}
# count_all_sequences_for_geo['total'] = count_all_sequences_for_geo['total'] + results_this_week
# if location_0 not in count_all_sequences_for_geo:
# count_all_sequences_for_geo[location_0] = results_this_week
# else:
# count_all_sequences_for_geo[location_0] = count_all_sequences_for_geo[location_0] + results_this_week
# print("count", count_all_sequences_for_geo)
return result
def get_all_mutation_not_characteristics(lineage, location_0, location_1, location_2, today_date,
location_granularity, ):
last_week_date = today_date.replace(day=today_date.day) - timedelta(days=7)
previous_week_date = last_week_date.replace(day=last_week_date.day) - timedelta(days=7)
query_count = {
'c_coll_date_prec': {
'$eq': 2
},
'collection_date': {
'$lt': today_date,
'$gte': previous_week_date
},
f'location.{location_granularity[0]}': {
'$eq': location_0
},
f'location.{location_granularity[1]}': {
'$eq': location_1
},
f'location.{location_granularity[2]}': {
'$eq': location_2
},
'covv_lineage': {
'$eq': lineage
},
}
denominator_lineage = collection_db.count_documents(query_count)
pipeline = [
{"$match": {
'c_coll_date_prec': {
'$eq': 2
},
'collection_date': {
'$lt': today_date,
'$gte': previous_week_date
},
f'location.{location_granularity[0]}': {
'$eq': location_0
},
f'location.{location_granularity[1]}': {
'$eq': location_1
},
f'location.{location_granularity[2]}': {
'$eq': location_2
},
'covv_lineage': {
'$eq': lineage
},
}},
{"$unwind": "$muts"},
{"$group": {"_id": {'lin': '$covv_lineage',
'pro': "$muts.pro",
'org': "$muts.org",
'loc': "$muts.loc",
'alt': "$muts.alt",
},
"count": {"$sum": 1}}},
]
results = collection_db.aggregate(pipeline, allowDiskUse=True)
results = (x['_id'] for x in results if 0.75 > x['count'] / denominator_lineage > 0.01)
result_array = []
for x in results:
ch = f"{x['pro']}_{x['org']}{x['loc']}{x['alt']}"
result_array.append(ch)
return result_array
all_geo_last_week_dict = {}
def get_all_geo_last_week(location_granularity, today_date):
print("inizio request geo last week")
world_growth(today_date, location_granularity)
last_week_date = today_date.replace(day=today_date.day) - timedelta(days=7)
query = [
{
"$match": {
'collection_date': {
'$lt': today_date,
'$gte': last_week_date
},
'c_coll_date_prec': {
'$eq': 2
},
},
},
{
"$group": {"_id": {f''
f'{location_granularity[0]}': f'$location.{location_granularity[0]}',
f'{location_granularity[1]}': f'$location.{location_granularity[1]}',
f'{location_granularity[2]}': f'$location.{location_granularity[2]}',
},
"count": {"$sum": 1}
}
},
{"$sort":
{"count": -1}
}
]
results = collection_db.aggregate(query, allowDiskUse=True)
list_geo_dict = []
for single_item in results:
single_item_remodel = {f'{location_granularity[0]}': single_item['_id'][f'{location_granularity[0]}'],
f'{location_granularity[1]}': single_item['_id'][f'{location_granularity[1]}'],
f'{location_granularity[2]}': single_item['_id'][f'{location_granularity[2]}'],
'count': single_item['count']}
list_geo_dict.append(single_item_remodel)
all_geo_last_week_dict['all_geo_last_week'] = list_geo_dict
print("fine request geo last week")
get_all_lineage_for_each_geo(location_granularity, today_date)
all_lineage_for_geo_last_week = {}
def get_all_lineage_for_each_geo(location_granularity, today_date):
print("inizio request all_lineages")
last_week_date = today_date.replace(day=today_date.day) - timedelta(days=7)
geo_dict = all_geo_last_week_dict['all_geo_last_week']
for geo in geo_dict:
location_0 = geo[f'{location_granularity[0]}']
location_1 = geo[f'{location_granularity[1]}']
location_2 = geo[f'{location_granularity[2]}']
# if(location_0 == 'Europe' and location_1 == 'Italy' and (location_2 == 'Abruzzo' or location_2 == 'Lombardia')
# or (location_0 == 'North America' and location_1 == 'Canada' and location_2 == 'Alberta')):
query = [
{
"$match": {
'collection_date': {
'$lt': today_date,
'$gte': last_week_date
},
'c_coll_date_prec': {
'$eq': 2
},
f'location.{location_granularity[0]}': {
'$eq': location_0
},
f'location.{location_granularity[1]}': {
'$eq': location_1
},
f'location.{location_granularity[2]}': {
'$eq': location_2
}
},
},
{
"$group": {"_id": {'lineage': '$covv_lineage'},
"count": {"$sum": 1}
}
},
{"$sort":
{"count": -1}
}
]
results = collection_db.aggregate(query, allowDiskUse=True)
list_geo_lineage_dict = []
for single_item in results:
geo_list = all_geo_last_week_dict['all_geo_last_week']
total_sequences = 0
for loc_geo in geo_list:
if loc_geo[f'{location_granularity[0]}'] == location_0 \
and loc_geo[f'{location_granularity[1]}'] == location_1 \
and loc_geo[f'{location_granularity[2]}'] == location_2:
total_sequences = loc_geo['count']
# print("qui", single_item['_id']['lineage'], location, single_item['count'], total_sequences)
if single_item['count'] / total_sequences > 0.01:
single_item_remodel = {f'{location_granularity[0]}': location_0,
f'{location_granularity[1]}': location_1,
f'{location_granularity[2]}': location_2,
'lineage': single_item['_id']['lineage'],
'count': single_item['count']}
list_geo_lineage_dict.append(single_item_remodel)
name = str(location_0) + '_' + str(location_1) + '_' + str(location_2)
all_lineage_for_geo_last_week[name] = list_geo_lineage_dict
print("fine request all_lineages")
get_all_mutation_for_lineage_for_each_geo_previous_week(location_granularity, today_date)
def populate_aggregate_place_dict(full_object, location_granularity, today_date, granularity):
location_0 = full_object[f'{location_granularity[0]}']
location_1 = full_object[f'{location_granularity[1]}']
location_2 = full_object[f'{location_granularity[2]}']
lineage = full_object['lineage']
mut = full_object['mut']
denominator_world_prev_week = full_object['total_seq_world_prev_week']
denominator_world_this_week = full_object['total_seq_world_this_week']
results_prev_week = full_object['count_prev_week']
results_this_week = full_object['count_this_week']
denominator_prev_week = full_object['total_seq_pop_prev_week']
denominator_this_week = full_object['total_seq_pop_this_week']
if granularity == 1:
distinct_name_granularity = location_1 + lineage + mut
elif granularity == 0:
distinct_name_granularity = location_0 + lineage + mut
else:
distinct_name_granularity = lineage + mut
if distinct_name_granularity not in dict_aggregated_place:
specific_object_granularity = \
{'lineage': lineage,
'mut': mut,
'total_seq_world_prev_week': denominator_world_prev_week,
'total_seq_world_this_week': denominator_world_this_week,
'total_seq_pop_prev_week': 0,
'total_seq_pop_this_week': 0,
'count_prev_week': results_prev_week,
'count_this_week': results_this_week,
'perc_prev_week': 0,
'perc_this_week': 0,
'diff_perc': 0,
'date': today_date.strftime("%Y-%m-%d"),
}
if granularity == 1:
specific_object_granularity[f'{location_granularity[0]}'] = location_0
specific_object_granularity[f'{location_granularity[1]}'] = location_1
specific_object_granularity[f'{location_granularity[2]}'] = None
specific_object_granularity['granularity'] = location_granularity[granularity]
specific_object_granularity['location'] = location_1
elif granularity == 0:
specific_object_granularity[f'{location_granularity[0]}'] = location_0
specific_object_granularity[f'{location_granularity[1]}'] = None
specific_object_granularity[f'{location_granularity[2]}'] = None
specific_object_granularity['granularity'] = location_granularity[granularity]
specific_object_granularity['location'] = location_0
else:
specific_object_granularity[f'{location_granularity[0]}'] = None
specific_object_granularity[f'{location_granularity[1]}'] = None
specific_object_granularity[f'{location_granularity[2]}'] = None
specific_object_granularity['granularity'] = 'world'
specific_object_granularity['location'] = 'World'
dict_aggregated_place[distinct_name_granularity] = specific_object_granularity
else:
dict_aggregated_place[distinct_name_granularity]['count_prev_week'] = \
dict_aggregated_place[distinct_name_granularity]['count_prev_week'] \
+ results_prev_week
dict_aggregated_place[distinct_name_granularity]['count_this_week'] = \
dict_aggregated_place[distinct_name_granularity]['count_this_week'] \
+ results_this_week
if granularity == 1:
distinct_nm_count_granularity = location_1 + lineage
elif granularity == 0:
distinct_nm_count_granularity = location_0 + lineage
else:
distinct_nm_count_granularity = lineage
if distinct_nm_count_granularity not in dict_count_aggregated_place:
dict_count_aggregated_place[distinct_nm_count_granularity] = {}
dict_count_aggregated_place[distinct_nm_count_granularity]['array_sub_place'] \
= [location_2]
dict_count_aggregated_place[distinct_nm_count_granularity]['total_seq_pop_prev_week'] \
= denominator_prev_week
dict_count_aggregated_place[distinct_nm_count_granularity]['total_seq_pop_this_week'] \
= denominator_this_week
else:
if location_2 not in \
dict_count_aggregated_place[distinct_nm_count_granularity]['array_sub_place']:
dict_count_aggregated_place[distinct_nm_count_granularity][
'total_seq_pop_prev_week'] = \
dict_count_aggregated_place[distinct_nm_count_granularity][
'total_seq_pop_prev_week'] + denominator_prev_week
dict_count_aggregated_place[distinct_nm_count_granularity][
'total_seq_pop_this_week'] = \
dict_count_aggregated_place[distinct_nm_count_granularity][
'total_seq_pop_this_week'] + denominator_this_week
dict_count_aggregated_place[distinct_nm_count_granularity][
'array_sub_place'].append(location_2)
def get_final_object_aggregated_place(single_obj):
final_obj = single_obj
granularity = final_obj['granularity']
if granularity != 'world':
name_granularity_granularity = final_obj[f'{granularity}'] + final_obj['lineage']
else:
name_granularity_granularity = final_obj['lineage']
denominator_prev_week_granularity = \
dict_count_aggregated_place[name_granularity_granularity]['total_seq_pop_prev_week']
denominator_this_week_granularity = \
dict_count_aggregated_place[name_granularity_granularity]['total_seq_pop_this_week']
final_obj['total_seq_pop_prev_week'] = denominator_prev_week_granularity
final_obj['total_seq_pop_this_week'] = denominator_this_week_granularity
if final_obj['total_seq_pop_prev_week'] != 0:
final_obj['perc_prev_week'] = (final_obj['count_prev_week'] / final_obj['total_seq_pop_prev_week']) * 100
else:
final_obj['perc_prev_week'] = 0
if final_obj['total_seq_pop_this_week'] != 0:
final_obj['perc_this_week'] = (final_obj['count_this_week'] / final_obj['total_seq_pop_this_week']) * 100
else:
final_obj['perc_this_week'] = 0
final_obj['diff_perc'] = abs(final_obj['perc_prev_week'] - final_obj['perc_this_week'])
return final_obj
def get_all_mutation_for_lineage_for_each_geo_previous_week(location_granularity, today_date):
print("inizio request all_mutation all_lineages previous week")
last_week_date = today_date.replace(day=today_date.day) - timedelta(days=7)
previous_week_date = last_week_date.replace(day=last_week_date.day) - timedelta(days=7)
geo_dict = all_geo_last_week_dict['all_geo_last_week']
all_all_mut_for_lineage_for_geo = []
denominator_world_prev_week = world_growth_obj['world_growth']['count_prev_week']
denominator_world_this_week = world_growth_obj['world_growth']['count_this_week']
# all_all_mut_for_lineage_for_geo = [world_growth_obj['world_growth']]
i = 0
total_num_of_geo = len(geo_dict)
for geo in geo_dict:
i = i + 1
print("GEO: ", i, " / ", total_num_of_geo, '_time_: ', today_date)
location_0 = geo[f'{location_granularity[0]}']
location_1 = geo[f'{location_granularity[1]}']
location_2 = geo[f'{location_granularity[2]}']
# if(location_0 == 'Europe' and location_1 == 'Italy' and (location_2 == 'Abruzzo' or location_2 == 'Lombardia')
# or (location_0 == 'North America' and location_1 == 'Canada' and location_2 == 'Alberta')):
name = str(location_0) + '_' + str(location_1) + '_' + str(location_2)
for lineage_obj in all_lineage_for_geo_last_week[name]:
lineage = lineage_obj['lineage']
# print("mut", location_0, location_1, location_2, lineage)
all_mutation_for_lineage_for_geo_previous_week = [world_growth_obj['world_growth']]
lineage_growth_result = lineage_growth(today_date, location_granularity,
location_0, location_1, location_2, lineage)
all_mutation_for_lineage_for_geo_previous_week.append(lineage_growth_result)
# all_all_mut_for_lineage_for_geo.append(lineage_growth_result)
denominator_prev_week = lineage_growth_result['count_prev_week']
denominator_this_week = lineage_growth_result['count_this_week']
# if lineage == 'B.1.617.2':
all_mutations_dict = get_all_mutation_not_characteristics(lineage,
location_0, location_1, location_2,
today_date, location_granularity, )
mut_dict = all_mutations_dict
for mut in mut_dict:
if '*' not in mut and '_-' not in mut: # and 'Spike' in mut:
m = PATTERN.fullmatch(mut)
if m:
protein, orig, loc, alt = m.groups()
orig = orig.replace('stop', '*')
alt = alt.replace('stop', '*')
loc = int(loc)
if orig == 'ins':
orig = '-' * len(alt)
t = 'INS'
elif alt == 'del':
alt = '-'
t = 'DEL'
else:
t = 'SUB'
length = len(alt)
new_mut = {'pro': protein, 'org': orig,
'loc': loc, 'alt': alt,
'typ': t, 'len': length}
query = {
'c_coll_date_prec': {
'$eq': 2
},
f'location.{location_granularity[0]}': {
'$eq': location_0
},
f'location.{location_granularity[1]}': {
'$eq': location_1
},
f'location.{location_granularity[2]}': {
'$eq': location_2
},
'covv_lineage': {
'$eq': lineage
},
'muts': {'$elemMatch': {
'pro': new_mut['pro'],
'loc': new_mut['loc'],
'alt': new_mut['alt'],
'org': new_mut['org'],
| |
<gh_stars>1-10
"""Module with widgets to control GeoGraphViewer."""
from __future__ import annotations
import logging
from typing import Dict, Optional
import ipywidgets as widgets
import traitlets
from geograph.visualisation import geoviewer, widget_utils
class BaseControlWidget(widgets.Box):
"""Base class for control widgets."""
def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:
"""Base class for control widgets.
Args:
viewer (geoviewer.GeoGraphViewer): GeoGraphViewer to control
"""
super().__init__()
self.viewer = viewer
# Setting log with handler, that allows access to log
# via self.log_handler.show_logs()
self.logger = logging.getLogger(type(self).__name__)
self.logger.setLevel(self.viewer.logger.level)
self.log_handler = self.viewer.log_handler
self.logger.addHandler(self.log_handler)
self.logger.info("BaseControlWidget initialised.")
class GraphControlWidget(BaseControlWidget):
"""Widget with full set of controls for GeoGraphViewer."""
def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:
"""Widget with full set of controls for GeoGraphViewer.
This is the control widget added to GeoGraphViewer. It is directly added to the
viewer and combines other widgets such as visbility control, metrics, settings
and more.
Args:
viewer (geoviewer.GeoGraphViewer): GeoGraphViewer to control
"""
super().__init__(viewer=viewer)
# Creating individual (sub-)widgets
visibility_widget = RadioVisibilityWidget(viewer=self.viewer)
metrics_widget = MetricsWidget(viewer=self.viewer)
settings_widget = SettingsWidget(viewer=self.viewer)
viewer_height = int(viewer.layout.height.replace("px", ""))
metrics_widget.layout.height = "{}px".format(viewer_height * 0.3)
if self.viewer.small_screen:
view_tab = [visibility_widget]
else:
view_tab = [visibility_widget, widget_utils.HRULE, metrics_widget]
# Create combined widget, each key corresponds to a tab
combined_widget_dict = dict()
combined_widget_dict["View"] = widgets.VBox(view_tab)
if self.viewer.small_screen:
combined_widget_dict["Metrics"] = metrics_widget
combined_widget_dict["Settings"] = settings_widget
combined_widget_dict["Log"] = self.log_handler.out
combined_widget = widgets.Tab()
combined_widget.children = list(combined_widget_dict.values())
for i, title in enumerate(combined_widget_dict):
combined_widget.set_title(i, title)
self.children = [combined_widget]
class RadioVisibilityWidget(BaseControlWidget):
"""Widget to control visibility of graphs in GeoGraphViewer with radio buttons."""
def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:
"""Widget to control visibility of graphs in GeoGraphViewer with radio buttons.
This widget controls the visibility of graph as well as current map layers of
GeoGraphViewer. Further, it sets the current_graph attribute of GeoGraphViewer
that controls its state and is used by other widgets.
Args:
viewer (geoviewer.GeoGraphViewer): GeoGraphViewer to control
"""
super().__init__(viewer=viewer)
# Resetting all prior visibility control
self.viewer.hide_all_layers()
widget = self.assemble_widget()
self.children = [widget]
def assemble_widget(self) -> widgets.Widget:
"""Assemble all sub-widgets making up VisibilityWidget into layout.
Returns:
widgets.Widget: final widget to be added to GeoGraphViewer
"""
graph_selection = self._create_layer_selection(layer_type="graphs")
map_selection = self._create_layer_selection(layer_type="maps")
view_buttons = self.create_visibility_buttons()
widget = widgets.VBox(
[
widget_utils.create_html_header("Graph Selection"),
graph_selection,
widget_utils.HRULE,
widget_utils.create_html_header("Map Selection"),
map_selection,
widget_utils.HRULE,
widget_utils.create_html_header("View Selection"),
view_buttons,
]
)
return widget
def _create_layer_selection(
self, layer_type: str = "graphs"
) -> widgets.RadioButtons:
"""Create radio buttons to enable layer selection.
Args:
layer_type (str, optional): one of "graphs" or "maps". Defaults to "graphs".
Returns:
widgets.RadioButtons: buttons to select from available layers of layer_type
"""
layer_list = []
layers = list(self.viewer.layer_dict[layer_type].items())
for layer_name, layer in layers:
layer_str = layer_name
if layer_type == "graphs" and layer["is_habitat"]:
layer_str += " (habitat of {})".format(layer["parent"])
layer_list.append((layer_str, layer_name))
radio_buttons = widgets.RadioButtons(
options=layer_list, description="", layout={"width": "max-content"}
)
if layer_type == "graphs":
viewer_attr = "current_graph"
elif layer_type == "maps":
viewer_attr = "current_map"
widgets.link((radio_buttons, "value"), (self.viewer, viewer_attr))
return radio_buttons
def create_visibility_buttons(self) -> widgets.Box:
"""Create buttons that toggle the visibility of current graph and map.
The visibility of the current graph (set in self.current_graph), its subparts
(e.g. components, disconnected nodes, etc.) and the map (set in
self.current_map) can be controlled with the returned buttons. Separate
buttons for the polygons and the components of the graph are included in the
returned box.
Returns:
widgets.Box: box with button widgets
"""
view_graph_btn = LayerButtonWidget(
description="Graph",
tooltip="View graph",
icon="project-diagram",
layer_type="graphs",
layer_subtype="graph",
viewer=self.viewer,
)
view_pgon_btn = LayerButtonWidget(
description="Polygons",
tooltip="View polygons",
icon="shapes",
layer_type="graphs",
layer_subtype="pgons",
viewer=self.viewer,
)
view_components_btn = LayerButtonWidget(
description="Components",
tooltip=(
"View components of graph. If current graph is habitat, components show"
" the reach of an animal in a component (based on max_travel_distance)."
),
icon="circle",
layer_type="graphs",
layer_subtype="components",
viewer=self.viewer,
)
view_map_btn = LayerButtonWidget(
description="Map",
tooltip="View map",
icon="globe-africa",
layer_type="maps",
layer_subtype="map",
viewer=self.viewer,
)
view_disconnected_nodes_btn = LayerButtonWidget(
description="Disconnected patches",
tooltip="View disconnected patches (patches with no edge)",
icon="exclamation-circle",
layer_type="graphs",
layer_subtype="disconnected_nodes",
viewer=self.viewer,
)
view_poorly_con_nodes_btn = LayerButtonWidget(
description="Poorly conn. patches",
tooltip="View poorly connected patches (patches with single edge)",
icon="exclamation-circle",
layer_type="graphs",
layer_subtype="poorly_connected_nodes",
viewer=self.viewer,
)
node_dynamics_btn = LayerButtonWidget(
description="Show node dynamics",
tooltip="Show node dynamics.",
icon="exclamation-circle",
layer_type="graphs",
layer_subtype="node_dynamics",
viewer=self.viewer,
)
node_change_btn = LayerButtonWidget(
description="Show node growth",
tooltip="View node absolute growth. See hover widget for patch values.",
icon="exclamation-circle",
layer_type="graphs",
layer_subtype="node_change",
viewer=self.viewer,
)
view_graph_btn.value = True
view_pgon_btn.value = True
view_map_btn.value = True
buttons = widgets.TwoByTwoLayout(
top_left=view_graph_btn,
top_right=view_pgon_btn,
bottom_left=view_components_btn,
bottom_right=view_map_btn,
)
buttons = widgets.VBox(
[
widget_utils.create_html_header("Main", level=2),
widgets.HBox([view_graph_btn, view_pgon_btn, view_map_btn]),
widget_utils.create_html_header("Insights", level=2),
widgets.VBox(
[
view_components_btn,
view_disconnected_nodes_btn,
view_poorly_con_nodes_btn,
node_dynamics_btn,
node_change_btn,
]
),
]
)
return buttons
class LayerButtonWidget(widgets.ToggleButton):
"""Toggle button to change the visibility of GeoGraphViewer layer."""
def __init__(
self,
viewer: geoviewer.GeoGraphViewer,
layer_type: str,
layer_subtype: str,
layer_name: Optional[str] = None,
link_to_current_state: bool = True,
layout: Optional[widgets.Layout] = None,
**kwargs,
) -> None:
"""Toggle button to change the visibility of GeoGraphViewer layer.
Args:
viewer (geoviewer.GeoGraphViewer): GeoGraphViewer to control
layer_type (str): type of layer
layer_subtype (str): subtype of layer
layer_name (Optional[str], optional): name of layer. Defaults to None. If
None, the layer_name is automatically set to viewer.current_graph or
viewer.current_map (depending on layer_type).
link_to_current_state (bool, optional): whether a traitlets link between
the current state of the viewer and the button layer_name should be
created. Defaults to True.
layout (Optional[widgets.Layout], optional): layout of the button.
Defaults to None.
"""
self.viewer = viewer
# Setting log with handler, that allows access to log
# via self.log_handler.show_logs()
self.logger = logging.getLogger(type(self).__name__)
self.logger.setLevel(self.viewer.logger.level)
self.log_handler = self.viewer.log_handler
self.logger.addHandler(self.log_handler)
if layout is None:
layout = widgets.Layout(height="auto", width="auto")
super().__init__(layout=layout, **kwargs)
self.add_traits(
layer_type=traitlets.Unicode().tag(sync=True),
layer_subtype=traitlets.Unicode().tag(sync=True),
layer_name=traitlets.Unicode().tag(sync=True),
)
self.layer_subtype = layer_subtype
self.layer_type = layer_type
if layer_type == "maps":
if layer_name is None:
layer_name = self.viewer.current_map
self.layer_name = layer_name
# If current map changes the function of this button changes
if link_to_current_state:
widgets.dlink((self.viewer, "current_map"), (self, "layer_name"))
elif layer_type == "graphs":
if layer_name is None:
layer_name = self.viewer.current_graph
self.layer_name = layer_name
if link_to_current_state:
widgets.dlink((self.viewer, "current_graph"), (self, "layer_name"))
self.observe(self._handle_view, names=["value", "layer_name"])
self._check_layer_exists()
self.logger.info("Initialised.")
def _handle_view(self, change: Dict) -> None:
"""Callback function for trait events in view buttons"""
try:
self.logger.info(
"LayerButtonWidget callback started for %s of %s. (type: %s)",
self.layer_subtype,
self.layer_name,
self.layer_type,
)
owner = change.owner # Button that is clicked or changed
# Accessed if button is clicked (its value changed)
if change.name == "value":
active = change.new
self.viewer.set_layer_visibility(
owner.layer_type, owner.layer_name, owner.layer_subtype, active
)
self.viewer.request_layer_update()
# Accessed if layer that the button is assigned to was changed
elif change.name == "layer_name":
new_layer_name = change.new
old_layer_name = change.old
# make old layer invisible
self.viewer.set_layer_visibility(
owner.layer_type, old_layer_name, owner.layer_subtype, False
)
# make new layer visible
self.viewer.set_layer_visibility(
owner.layer_type, new_layer_name, owner.layer_subtype, owner.value
)
self._check_layer_exists()
# Note: there is a potential for speed improvement by not updating map
# layers for each button separately, as is done here.
self.viewer.request_layer_update()
except: # pylint: disable=bare-except
self.logger.exception(
"Exception in LayerButtonWidget callback on button click or change."
)
def _check_layer_exists(self) -> None:
"""Check if layer exists and hide button if it doesn't."""
layer_exists = (
self.viewer.layer_dict[self.layer_type][self.layer_name][
self.layer_subtype
]["layer"]
is not None
)
# hide button if layer doesn't exist
if layer_exists:
self.layout.display = "block"
else:
self.layout.display = "none"
self.logger.debug(
(
"LayerButtonWidget hidden for %s of %s. "
"(type: %s). Layer doesn't exist."
),
self.layer_subtype,
self.layer_name,
self.layer_type,
)
class CheckboxVisibilityWidget(BaseControlWidget):
"""Widget to control visibility of graphs in GeoGraphViewer with checkboxes."""
def __init__(self, viewer: geoviewer.GeoGraphViewer) -> None:
"""Widget to control visibility of graphs in GeoGraphViewer with checkboxes.
Note: this is currently not used by the main GraphControlWidget.
Args:
viewer (geoviewer.GeoGraphViewer): GeoGraphViewer to control
"""
super().__init__(viewer=viewer)
widget = self._create_checkboxes()
self.children = [widget]
def _create_checkboxes(self) -> widgets.VBox:
"""Create widget with checkbox for each layer.
Returns:
widgets.VBox: widget
"""
checkboxes = []
pgons_checkboxes = []
graph_checkboxes = []
graphs = [
(name, "graphs", layer_subtype, graph)
for name, graph in self.viewer.layer_dict["graphs"].items()
for layer_subtype in ["graph", "pgons"]
]
maps = [
(name, "maps", "map", map_layer["map"])
for name, map_layer in self.viewer.layer_dict["maps"].items()
]
# Add checkboxes for all maps and graphs (including habitats)
for idx, (layer_name, layer_type, layer_subtype, layer_dict) in enumerate(
maps + graphs
):
layout = widgets.Layout(padding="0px 0px 0px 0px")
# Indent habitat checkboxes
if layer_type == "graphs":
if layer_dict["is_habitat"]:
layout = widgets.Layout(padding="0px 0px 0px 25px")
checkbox = widgets.Checkbox(
value=True,
description="{} ({})".format(layer_name, layer_subtype),
disabled=False,
indent=False,
layout=layout,
)
checkbox.add_traits(
layer_type=traitlets.Unicode().tag(sync=True),
layer_subtype=traitlets.Unicode().tag(sync=True),
layer_name=traitlets.Unicode().tag(sync=True),
)
checkbox.layer_type = layer_type
checkbox.layer_name = layer_name
checkbox.layer_subtype | |
# -*- coding: utf-8 -*-
#try:
# # Python 2.7
# from collections import OrderedDict
#except:
# # Python 2.6
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon import current, A, DIV, H3, TAG, SQLFORM, IS_NOT_EMPTY, IS_EMAIL
from gluon.storage import Storage
def config(settings):
"""
Template settings for DRR Project Portal
http://eden.sahanafoundation.org/wiki/Deployments/DRRProjectPortal
"""
T = current.T
# Base Settings
# Pre-Populate
settings.base.prepopulate = ("DRRPP", "default/users")
settings.base.system_name = T("DRR Project Portal")
settings.base.system_name_short = T("DRRPP")
# Theme (folder to use for views/layout.html)
settings.base.theme = "DRRPP"
# =============================================================================
# Auth Settings
# Security Policy
settings.security.policy = 6 # Realm
settings.security.map = True
# Do new users need to verify their email address?
settings.auth.registration_requires_verification = True
# Do new users need to be approved by an administrator prior to being able to login?
settings.auth.registration_requires_approval = True
# Uncomment this to request the Organisation when a user registers
settings.auth.registration_requests_organisation = True
settings.auth.registration_pending = \
"""Registration awaiting approval from Administrator or Organisation Contact.
A confirmation email will be sent to you once approved.
For enquiries contact %s""" % settings.get_mail_approver()
# Record Approval
settings.auth.record_approval = True
settings.auth.record_approval_required_for = ("org_organisation",
"project_project",
"project_framework",
)
# =============================================================================
# L10n Settings
settings.L10n.languages = OrderedDict([
("en-gb", "English"),
])
settings.L10n.default_language = "en-gb"
# Default timezone for users
settings.L10n.utc_offset = "UTC +0700"
# Number formats (defaults to ISO 31-0)
# Decimal separator for numbers (defaults to ,)
settings.L10n.decimal_separator = "."
# Thousands separator for numbers (defaults to space)
settings.L10n.thousands_separator = ","
# Unsortable 'pretty' date format
#settings.L10n.date_format = "%d-%b-%Y"
# =============================================================================
# Finance Settings
settings.fin.currencies = {
#"AUD" : T("Australian Dollars"),
#"CAD" : T("Canadian Dollars"),
"EUR" : T("Euros"), # Needed for IFRC RMS interop
#"GBP" : T("Great British Pounds"),
"PHP" : T("Philippine Pesos"), # Needed for IFRC RMS interop
"CHF" : T("Swiss Francs"), # Needed for IFRC RMS interop
"USD" : T("United States Dollars"),
"NZD" : T("New Zealand Dollars"),
}
# =============================================================================
# GIS Settings
# Theme
settings.gis.map_height = 600
settings.gis.map_width = 960 # container_12
# Display Resources recorded to Admin-Level Locations on the map
# @ToDo: Move into gis_config?
settings.gis.display_L0 = True
# Deployment only covers Asia-Pacific
settings.gis.countries = [ "AF", "AU", "BD", "BN", "CK", "CN", "FJ", "FM", "HK", "ID", "IN", "JP", "KH", "KI", "KP", "KR", "LA", "MH", "MM", "MN", "MV", "MY", "NP", "NZ", "PG", "PH", "PK", "PW", "SB", "SG", "SL", "TH", "TL", "TO", "TV", "TW", "VN", "VU", "WS"]
# Resources which can be directly added to the main map
settings.gis.poi_create_resources = None
# =============================================================================
# Organisation Settings
# Enable the use of Organisation Branches
# RMS-compatibility
settings.org.branches = True
# =============================================================================
# Project Settings
# Uncomment this to use settings suitable for a global/regional organisation (e.g. DRR)
settings.project.mode_3w = True
# Uncomment this to use DRR (Disaster Risk Reduction) extensions
settings.project.mode_drr = True
# Uncomment this to use Codes for projects
settings.project.codes = True
# Uncomment this to call project locations 'Communities'
#settings.project.community = True
# Uncomment this to enable Hazards in 3W projects
settings.project.hazards = True
# Uncomment this to create a project_location for each country which is a project is implemented in
# - done via Custom Form instead
#settings.project.locations_from_countries = True
# Uncomment this to use multiple Budgets per project
#settings.project.multiple_budgets = True
# Uncomment this to use multiple Organisations per project
settings.project.multiple_organisations = True
# Uncomment this to disable Sectors in projects
settings.project.sectors = False
# Uncomment this to enable Themes in 3W projects
settings.project.themes = True
# Uncomment this to customise
# Links to Filtered Components for Donors & Partners
settings.project.organisation_roles = {
1: T("Lead Organization"),
2: T("Partner Organization"),
3: T("Donor"),
#4: T("Customer"), # T("Beneficiary")?
#5: T("Supplier"),
9: T("Partner Organization"), # Needed for IFRC RMS interop ("Partner National Society")
}
# =============================================================================
# UI Settings
# Enable this for a UN-style deployment
settings.ui.cluster = True
settings.ui.hide_report_options = False
settings.ui.hide_report_filter_options = True
# Uncomment to restrict the export formats available
settings.ui.export_formats = ["xls", "xml"]
# Uncomment to include an Interim Save button on CRUD forms
settings.ui.interim_save = True
# Uncomment to disable responsive behavior of datatables
# - Disabled until tested
settings.ui.datatables_responsive = False
# -----------------------------------------------------------------------------
# Formstyle
def formstyle_row(id, label, widget, comment, hidden=False):
if hidden:
hide = "hide"
else:
hide = ""
row = DIV(DIV(label,
_id=id + "_label",
_class="w2p_fl"),
DIV(widget,
_id=id + "_widget",
_class="w2p_fw"),
DIV(comment,
_id=id + "_comment",
_class="w2p_fc"),
_id=id,
_class = "w2p_r %s" % hide,
)
return row
# -----------------------------------------------------------------------------
def formstyle(self, xfields):
"""
Use new Web2Py formstyle to generate form using DIVs & CSS
CSS can then be used to create MUCH more flexible form designs:
- Labels above vs. labels to left
- Multiple Columns
@ToDo: Requires further changes to code to use
"""
form = DIV()
for id, a, b, c, in xfields:
form.append(formstyle_row(id, a, b, c))
return form
settings.ui.formstyle_row = formstyle_row
#settings.ui.formstyle = formstyle # Breaks e.g. org/organisation/create
settings.ui.formstyle = formstyle_row
# -----------------------------------------------------------------------------
def customise_project_project_controller(**attr):
db = current.db
s3db = current.s3db
s3 = current.response.s3
tablename = "project_project"
# Load normal model
table = s3db[tablename]
# Custom Components
s3db.add_components(tablename,
project_drrpp={"joinby":"project_id",
"multiple": False,
},
project_output="project_id",
doc_document=(# Files
{"name": "file",
"joinby": "doc_id",
"filterby": "url",
"filterfor": ("", None),
},
# Links
{"name": "url",
"joinby": "doc_id",
"filterby": "file",
"filterfor": ("", None),
},
),
)
# Custom Fields
table.name.label = T("Project Title")
s3db.project_project.budget.label = T("Total Funding (USD)")
location_id = s3db.project_location.location_id
location_id.label = ""
# Limit to just Countries
location_id.requires = s3db.gis_country_requires
# Use dropdown, not AC
location_id.widget = None
# In DRRPP this is a free field
table = s3db.project_organisation
table.comments.label = T("Role")
table.comments.widget = SQLFORM.widgets.string.widget
table.amount.label = T("Amount")
table = s3db.doc_document
table.file.widget = lambda field, value, download_url: \
SQLFORM.widgets.upload.widget(field, value, download_url, _size = 15)
table.comments.widget = SQLFORM.widgets.string.widget
# If not logged in, contact person is required
logged_in = current.auth.is_logged_in()
if not logged_in:
table = s3db.project_drrpp
table.focal_person.required = True
table.email.required = True
table.email.requires = IS_EMAIL()
# Custom dataTable
s3["dataTable_dom"] = 'ripl<"dataTable_table"t>p'
# Don't show export buttons for XLS/XML
s3.formats = Storage(xls=None, xml=None)
# Remove rheader
attr["rheader"] = None
# Only show 10 Project by default to improve load time
attr["dt_lengthMenu"] = [[ 10, 50, -1], [ 10, 50, T("All")]]
s3.dataTable_pageLength = 10
# Custom PreP
standard_prep = s3.prep
def custom_prep(r):
resource = r.resource
# Call standard prep
if callable(standard_prep):
result = standard_prep(r)
if not result:
return False
# Customise list_fields
if r.method == "review":
list_fields = ["id",
"created_on",
"modified_on",
"name",
"start_date",
(T("Countries"), "location.location_id"),
(T("Hazards"), "hazard.name"),
(T("Lead Organization"), "organisation_id"),
(T("Donors"), "donor.organisation_id"),
]
elif r.representation == "xls":
# All readable Fields should be exported
list_fields = ["id",
"name",
"code",
"description",
"status_id",
"start_date",
"end_date",
"drrpp.duration",
(T("Countries"), "location.location_id"),
"drrpp.L1",
(T("Hazards"), "hazard.name"),
(T("Themes"), "theme.name"),
"objectives",
"drrpp.activities",
"output.name",
"drr.hfa",
"drrpp.rfa",
"drrpp.pifacc",
"drrpp.jnap",
(T("Lead Organization"), "organisation_id"),
(T("Partners"), "partner.organisation_id"),
(T("Donors"), "donor.organisation_id"),
"budget",
"currency",
"drrpp.focal_person",
"drrpp.organisation_id",
"drrpp.email",
"url.url",
"drrpp.parent_project",
"comments",
]
if logged_in:
list_fields.extend(["created_by",
"created_on",
"modified_by",
"modified_on",
])
else:
list_fields = ["id",
"name",
"start_date",
(T("Countries"), "location.location_id"),
(T("Hazards"), "hazard.name"),
(T("Lead Organization"), "organisation_id"),
(T("Donors"), "donor.organisation_id"),
]
resource.configure(list_fields = list_fields)
# Customise report_options
if r.method == "report":
report_fields = ["name",
(T("Countries"), "location.location_id"),
(T("Hazards"), "hazard.name"),
(T("Themes"), "theme.name"),
(T("HFA Priorities"), "drr.hfa"),
(T("RFA Priorities"), "drrpp.rfa"),
(T("Lead Organization"), "organisation_id"),
(T("Partner Organizations"), "partner.organisation_id"),
(T("Donors"), "donor.organisation_id"),
]
# Report Settings for charts
if "chart" in r.get_vars and r.representation != "json":
s3.crud_strings[tablename].title_report = T("Project Graph")
report_fact_default = "count(id)"
report_facts = [(T("Number of Projects"), "count(id)")]
show_table = False
else:
s3.crud_strings[tablename].title_report = T("Project Matrix")
report_fact_default = "count(id)"
report_facts = [(T("Number of Projects"), "count(id)"),
(T("Number of Countries"), "count(location.location_id)"),
(T("Number of Hazards"), "count(hazard.id)"),
(T("Number of Themes"), "count(theme.id)"),
(T("Number of HFA Priorities"), "count(drr.hfa)"),
(T("Number of RFA Priorities"), "count(drrpp.rfa)"),
(T("Number of Lead Organizations"), "count(organisation_id)"),
(T("Number of Partner Organizations"), "count(partner.organisation_id)"),
(T("Number of Donors"), "count(donor.organisation_id)"),
]
show_table = True
report_options = Storage(rows = report_fields,
cols = report_fields,
fact = report_facts,
defaults = Storage(rows = "hazard.name",
cols = "location.location_id",
fact = report_fact_default,
totals = True,
table = show_table,
)
)
resource.configure(report_options = report_options)
current.deployment_settings.ui.hide_report_options = True
if r.interactive:
# Don't show Update/Delete button on Search table
if r.method | |
boxes and the top & bottom bounding boxes.
lr_bbox = find_bbox_bbox(bboxes['left'], bboxes['right'])
lr_hgt = abs(lr_bbox[0][Y] - lr_bbox[1][Y])
tb_bbox = find_bbox_bbox(bboxes['top'], bboxes['bottom'])
tb_hgt = abs(tb_bbox[0][Y] - tb_bbox[1][Y])
if 0.75 <= float(lr_hgt)/float(tb_hgt) <= 1/0.75:
bal_bbox = find_bbox_bbox(*list(bboxes.values()))
for side in bboxes:
bboxes[side] = copy(bal_bbox)
else:
bboxes['left'] = copy(lr_bbox)
bboxes['right'] = copy(lr_bbox)
bboxes['top'] = copy(tb_bbox)
bboxes['bottom'] = copy(tb_bbox)
elif num_sides == 3:
# If the symbol only has pins on threee sides, then equalize the
# bounding boxes for the pins on opposite sides and leave the
# bounding box on the other side unchanged.
if 'left' not in bboxes or 'right' not in bboxes:
# Top & bottom side pins, but the left or right side is empty.
bal_bbox = find_bbox_bbox(bboxes['top'], bboxes['bottom'])
bboxes['top'] = copy(bal_bbox)
bboxes['bottom'] = copy(bal_bbox)
elif 'top' not in bboxes or 'bottom' not in bboxes:
# Left & right side pins, but the top or bottom side is empty.
bal_bbox = find_bbox_bbox(bboxes['left'], bboxes['right'])
bboxes['left'] = copy(bal_bbox)
bboxes['right'] = copy(bal_bbox)
elif num_sides == 2:
# If the symbol only has pins on two opposing sides, then equalize the
# height of the bounding boxes for each side. Leave the width unchanged.
if 'left' in bboxes and 'right' in bboxes:
bal_bbox = find_bbox_bbox(bboxes['left'], bboxes['right'])
bboxes['left'][0][Y] = bal_bbox[0][Y]
bboxes['left'][1][Y] = bal_bbox[1][Y]
bboxes['right'][0][Y] = bal_bbox[0][Y]
bboxes['right'][1][Y] = bal_bbox[1][Y]
elif 'top' in bboxes and 'bottom' in bboxes:
bal_bbox = find_bbox_bbox(bboxes['top'], bboxes['bottom'])
bboxes['top'][0][Y] = bal_bbox[0][Y]
bboxes['top'][1][Y] = bal_bbox[1][Y]
bboxes['bottom'][0][Y] = bal_bbox[0][Y]
bboxes['bottom'][1][Y] = bal_bbox[1][Y]
def draw_pins(unit_num, unit_pins, bbox, transform, fuzzy_match):
'''Draw a column of pins rotated/translated by the transform matrix.'''
# String to add pin definitions to.
pin_defn = ''
# Find the actual height of the column of pins and subtract it from the
# bounding box (which should be at least as large). Half the difference
# will be the offset needed to center the pins on the side of the symbol.
Y = 1 # Index for Y coordinate.
pins_bb = pins_bbox(unit_pins)
height_offset = abs(bbox[0][Y]-bbox[1][Y]) - abs(pins_bb[0][Y]-pins_bb[1][Y])
height_offset /= 2
height_offset -= height_offset % PIN_SPACING # Keep everything on the PIN_SPACING grid.
# Start drawing pins from the origin.
x = XO
y = YO - height_offset
for name, pins in unit_pins:
# Detect pins with "spacer" pin numbers.
pin_spacer = 0
pin_num_len = 0
for pin in pins:
pin_num, pin_spacer = get_pin_num_and_spacer(pin)
pin_num_len = max(pin_num_len, len(pin_num))
y -= pin_spacer * PIN_SPACING # Add space between pins if there was a spacer.
if pin_num_len == 0:
continue # Omit pin if it only had a spacer prefix and no actual pin number.
# Rotate/translate the current drawing point.
(draw_x, draw_y) = transform * (x, y)
# Use approximate matching to determine the pin's type, style and orientation.
pin_type = find_closest_match(pins[0].type, PIN_TYPES, fuzzy_match)
pin_style = find_closest_match(pins[0].style, PIN_STYLES, fuzzy_match)
pin_side = find_closest_match(pins[0].side, PIN_ORIENTATIONS,
fuzzy_match)
if pins[0].hidden.lower().strip() in ['y', 'yes', 't', 'true', '1']:
pin_style = 'N' + pin_style
# Create all the pins with a particular name. If there are more than one,
# they are laid on top of each other and only the first is visible.
num_size = PIN_NUM_SIZE # First pin will be visible.
for pin in pins:
pin_num = str(pin.num)
# Remove any spacer prefix on the pin numbers.
if pin_num.startswith(PIN_SPACER_PREFIX):
pin_num = pin_num[1:]
# Create a pin using the pin data.
pin_defn += PIN.format(name=pin.name,
num=pin_num,
x=int(draw_x),
y=int(draw_y),
length=PIN_LENGTH,
orientation=pin_side,
num_sz=num_size,
name_sz=PIN_NAME_SIZE,
unit_num=unit_num,
pin_type=pin_type,
pin_style=pin_style)
# Turn off visibility after the first pin.
num_size = 0
# Move to the next pin placement location on this unit.
y -= PIN_SPACING
return pin_defn # Return part symbol definition with pins added.
def zero_pad_nums(s):
# Pad all numbers in the string with leading 0's.
# Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will
# appear before A10 in a list.
try:
return re.sub(r'\d+', lambda mtch: '0' * (8 - len(mtch.group(0))) + mtch.group(0), s)
except TypeError:
return s # The input is probably not a string, so just return it unchanged.
def num_key(pin):
'''Generate a key from a pin's number so they are sorted by position on the package.'''
# Pad all numeric strings in the pin name with leading 0's.
# Thus, 'A10' and 'A2' will become 'A00010' and 'A00002' and A2 will
# appear before A10 in a list.
return zero_pad_nums(pin[1][0].num)
def name_key(pin):
'''Generate a key from a pin's name so they are sorted more logically.'''
# Pad all numeric strings in the pin name with leading 0's.
# Thus, 'adc10' and 'adc2' will become 'adc00010' and 'adc00002' and adc2 will
# appear before adc10 in a list.
return zero_pad_nums(pin[1][0].name)
def row_key(pin):
'''Generate a key from the order the pins were entered into the CSV file.'''
return pin[1][0].index
def draw_symbol(part_num, part_ref_prefix, part_footprint, part_manf_num, pin_data, sort_type, reverse, fuzzy_match):
'''Add a symbol for a part to the library.'''
# Start the part definition with the header.
part_defn = START_DEF.format(name=part_num,
ref=part_ref_prefix,
pin_name_offset=PIN_NAME_OFFSET,
show_pin_number=SHOW_PIN_NUMBER and 'Y' or 'N',
show_pin_name=SHOW_PIN_NAME and 'Y' or 'N',
num_units=len(pin_data))
# Determine if there are pins across the top of the symbol.
# If so, right-justify the reference and part number so they don't
# run into the top pins. If not, stick with left-justification.
horiz_just = 'L'
horiz_offset = PIN_LENGTH
for unit in list(pin_data.values()):
if 'top' in list(unit.keys()):
horiz_just = 'R'
horiz_offset = PIN_LENGTH - 50
break
# Create the field that stores the part reference.
if part_ref_prefix:
part_defn += REF_FIELD.format(ref_prefix=part_ref_prefix,
x=XO + horiz_offset,
y=YO + REF_Y_OFFSET,
horiz_just=horiz_just,
ref_size=REF_SIZE)
# Create the field that stores the part number.
if part_num:
part_defn += PART_FIELD.format(part_num=part_num,
x=XO + horiz_offset,
y=YO + PART_NUM_Y_OFFSET,
horiz_just=horiz_just,
ref_size=PART_NUM_SIZE)
# Create the field that stores the part footprint.
if part_footprint:
part_defn += FOOTPRINT_FIELD.format(footprint=part_footprint,
x=XO + horiz_offset,
y=YO + PART_FOOTPRINT_Y_OFFSET,
horiz_just=horiz_just,
ref_size=PART_FOOTPRINT_SIZE)
# Create the field that stores the manufacturer part number.
if part_manf_num:
part_defn += MPN_FIELD.format(manf_num=part_manf_num,
x=XO + horiz_offset,
y=YO + PART_MPN_Y_OFFSET,
horiz_just=horiz_just,
ref_size=PART_MPN_SIZE)
# Start the section of the part definition that holds the part's units.
part_defn += START_DRAW
# Get a reference to the sort-key generation function for pins.
pin_key_func = getattr(THIS_MODULE, '{}_key'.format(sort_type))
# This is the sort-key generation function for unit names.
unit_key_func = lambda x: zero_pad_nums(x[0])
# Now create the units that make up the part. Unit numbers go from 1
# up to the number of units in the part. The units are sorted by their
# names before assigning unit numbers.
for unit_num, unit in enumerate([p[1] for p in sorted(pin_data.items(),key=unit_key_func)], 1):
# The indices of the X and Y coordinates in a list of point coords.
X = 0
Y = 1
# Initialize data structures that store info for each side of a schematic symbol unit.
all_sides = ['left', 'right', 'top', 'bottom']
bbox = {side: [(XO, YO), (XO, YO)] for side in all_sides}
box_pt = {
side: [XO + PIN_LENGTH, YO + PIN_SPACING]
for side in all_sides
}
anchor_pt = {
side: [XO + PIN_LENGTH, YO + PIN_SPACING]
for side in all_sides
}
transform = {}
# Annotate the pins for each side of the symbol.
for side_pins in list(unit.values()):
annotate_pins(list(side_pins.items()))
# Determine the actual bounding box for each side.
bbox = {}
for side, side_pins in list(unit.items()):
bbox[side] = pins_bbox(list(side_pins.items()))
# Adjust the sizes of the bboxes to make the unit look more symmetrical.
balance_bboxes(bbox)
# Determine some important points for each side of pins.
for side in unit:
#
# C B-------A
# | |
# ------| name1 |
# | |
# ------| name2 |
#
# A = anchor point = upper-right corner of bounding box.
# B = box point = upper-left corner of bounding box + pin length.
# C = upper-left corner of bounding box.
anchor_pt[side] = [max(bbox[side][0][X], bbox[side][1][X]),
max(bbox[side][0][Y], bbox[side][1][Y])]
box_pt[side] = [
min(bbox[side][0][X], bbox[side][1][X]) + PIN_LENGTH,
max(bbox[side][0][Y], bbox[side][1][Y])
]
# AL = left-side anchor point.
# AB = bottom-side anchor point.
# AR = right-side | |
#!/usr/bin/env python
"""
<NAME>
Feb 2021
external calibration of two odometries
"""
import rospy
from nav_msgs.msg import Odometry
import numpy as np
import message_filters
import tf
import random
from geometry_msgs.msg._Pose import Pose
from sklearn.linear_model import RANSACRegressor
from utils import so3_estimation
estimation_state_dict = {'rotation':0, 'translation':1, 'done':2}
data_collecting_list = ['x', 'y', 'z', 'roll', 'pitch', 'yaw']
class Estimator(object):
def __init__(self):
# data/samples
self.P_rr = None # capital p is pose; little p is position
self.P_cc = None
self.samples = [] # samples are constructed by pose or positions or both depending on the estimation algorithm
self.N = 4
def get_transforms(self, odom):
x = odom.pose.pose.position.x
y = odom.pose.pose.position.y
z = odom.pose.pose.position.z
pos = np.asarray((x, y, z, 1))
x = odom.pose.pose.orientation.x
y = odom.pose.pose.orientation.y
z = odom.pose.pose.orientation.z
w = odom.pose.pose.orientation.w
qua = (x, y, z, w)
T = tf.transformations.quaternion_matrix(qua)
T[:, 3] = pos
return T
def calcRotationDiff(self, r1, r2):
err_matrix = (np.matmul(r1.transpose(),r2) - np.matmul(r1,r2.transpose()))/2.
x3 = err_matrix[1, 0]
x2 = err_matrix[0, 2]
x1 = err_matrix[2, 1]
#print(x1, x2, x3)
return abs(x1) + abs(x2) + abs(x3)
def add_N(self, n):
self.N = self.N + n
class RotEstimator(Estimator):
def __init__(self):
Estimator.__init__(self)
self.distance_threshold = 0.1
self.distance_min = 0.1
self.distance_max = 0.75
self.tolerance_angle = 15 / 180. * np.pi
self.N_x = 0
self.N_y = 0
self.N_z = 0
def has_sufficent_sample(self):
if (self.N_x < self.N) or (self.N_y < self.N) or (self.N_z < self.N):
return False
else:
return True
def add_sample(self, gps_odom, vio_odom):
T_gps = self.get_transforms(gps_odom)
T_vio = self.get_transforms(vio_odom)
if self.P_cc is None:
self.P_cc = T_vio
self.P_rr = T_gps
return
# insert key sample
if not self.insert_sample(T_vio, T_gps):
return
if self.N_x < self.N:
print("Next translation goal: " + str(self.distance_threshold) + " on x direction")
return
if self.N_y < self.N:
print("Next translation goal: " + str(self.distance_threshold) + " on y direction")
return
if self.N_z < self.N:
print("Next translation goal: " + str(self.distance_threshold) + " on z direction")
return
def estimate_params(self):
# get ransac data
Y = []
X = []
delta_vio_pos = np.array([sample[0, :] for sample in self.samples])
delta_gps_pos = np.array([sample[1, :] for sample in self.samples])
N = delta_gps_pos.shape[0]
for i in range(N):
delta_vio = delta_vio_pos[i]
delta_gps = delta_gps_pos[i]
x1 = np.array((delta_vio[0], delta_vio[1], delta_vio[2], 0, 0, 0, 0, 0, 0, 1, 0, 0))
y1 = delta_gps[0]
x2 = np.array((0, 0, 0, delta_vio[0], delta_vio[1], delta_vio[2], 0, 0, 0, 0, 1, 0))
y2 = delta_gps[1]
x3 = np.array((0, 0, 0, 0, 0, 0, delta_vio[0], delta_vio[1], delta_vio[2], 0, 0, 1))
y3 = delta_gps[2]
X.append(x1)
X.append(x2)
X.append(x3)
Y.append(y1)
Y.append(y2)
Y.append(y3)
X = np.asarray(X)
Y = np.asarray(Y)
print("equation number: ")
print("X: " + str(self.N_x))
print("Y: " + str(self.N_y))
print("Z: " + str(self.N_z))
# ransac
# https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.RANSACRegressor.html
X = X[:, :-3]
reg = RANSACRegressor(random_state=0).fit(X, Y)
params = reg.estimator_.coef_
inlier_mask = reg.inlier_mask_
R = params.reshape((3, 3))
#R = so3_estimation.Rot_Estimate_Frobenius_norm(R)
R = so3_estimation.Rot_Estimate_SVD_SO3(R)
return R
def insert_sample(self, T_vio, T_gps):
previous_vio_pos = self.P_cc[:3, 3]
current_vio_pos = T_vio[:3, 3]
p1_distance = np.abs(current_vio_pos - previous_vio_pos).max()
if p1_distance < self.distance_threshold:
return False
else:
previous_rot = self.P_cc[:3,:3]
current_rot = T_vio[:3,:3]
angle = self.calcRotationDiff(previous_rot, current_rot)
if angle <= self.tolerance_angle:
# append sample
delta_vio_pos = current_vio_pos - previous_vio_pos
previous_gps_pos = self.P_rr[:3, 3]
current_gps_pos = T_gps[:3, 3]
delta_gps_pos = current_gps_pos - previous_gps_pos
dim_idx = np.abs(delta_vio_pos).argmax()
if dim_idx == 0:
self.N_x += 1
print("Got sample on x direction")
elif dim_idx == 1:
self.N_y += 1
print("Got sample on y direction")
elif dim_idx == 2:
self.N_z += 1
print("Got sample on z direction")
self.samples.append(np.array((previous_vio_pos, previous_gps_pos)))
# reset self.distance_threshold
self.distance_threshold = random.random() * (self.distance_max - self.distance_min) + self.distance_min
# update current pose
self.P_cc = T_vio
self.P_rr = T_gps
return True
else:
return False
class TransEstimator(Estimator):
def __init__(self):
Estimator.__init__(self)
self.rot_min = 15 / 180. * np.pi
self.rot_max = 70 / 180. * np.pi
self.rot_angle_threshold = 15 / 180. * np.pi # self.rot_angle_threshold is bounded by self.rot_min and self.rot_max
self.N_roll = 0
self.N_pitch = 0
self.N_yaw = 0
self.R = np.eye(3)
def set_R(self, R):
self.R = R
def has_sufficent_sample(self):
if (self.N_roll < self.N) or (self.N_pitch < self.N) or (self.N_yaw < self.N):
return False
else:
return True
def add_sample(self, gps_odom, vio_odom):
T_gps = self.get_transforms(gps_odom)
T_vio = self.get_transforms(vio_odom)
if self.P_cc is None:
self.P_cc = T_vio
self.P_rr = T_gps
return
# insert key sample
if not self.insert_sample(T_vio, T_gps):
return
if self.N_roll < self.N:
print("Next rotation goal: " + str(self.rot_angle_threshold) + " on roll")
return
if self.N_pitch < self.N:
print("Next rotation goal: " + str(self.rot_angle_threshold) + " on pitch")
return
if self.N_yaw < self.N:
print("Next rotation goal: " + str(self.rot_angle_threshold) + " on yaw")
return
def estimate_params(self):
N = len(self.samples)
X = []
Y = []
for i in range(N):
T_vio, T_gps = self.samples[i]
P_rr = T_gps
p_rr = T_gps[:, 3]
p_cc = T_vio[:, 3]
A, r = self.computeTanslationSystemParam(P_rr, p_rr, p_cc)
for j in range(3):
X.append(A[j, :])
Y.append(r[j])
"""
inv_P_rr = np.linalg.inv(P_rr)
trans_vec = (0.1, 0, -0.01)
trans = tf.transformations.translation_matrix(trans_vec)
rot = tf.transformations.euler_matrix(0.5, 1.0471975512, -0.5)
T = np.matmul(trans, rot)
T_gt = np.linalg.inv(T)
p_rc = np.matmul(T_gt, p_cc)
trans_vec = T_gt[:3, 3]
T0 = tf.transformations.translation_matrix(trans_vec)
p_const = np.matmul(inv_P_rr, p_rc) - np.matmul(inv_P_rr, p_rr)
"""
X = np.asarray(X)
Y = np.asarray(Y)
print("equation number: ")
print("roll: " + str(self.N_roll))
print("pitch: " + str(self.N_pitch))
print("yaw: " + str(self.N_yaw))
reg = RANSACRegressor(random_state=0).fit(X, Y)
params = reg.estimator_.coef_
return params
def computeTanslationSystemParam(self, P_rr, p_rr, p_cc):
# check out wiki for the translation system: https://github.com/ZhiangChen/gps_vio/wiki/T265-External-Calibration#2-estimating-translation-matrix
inv_P_rr = np.linalg.inv(P_rr)
a11 = P_rr[0, 0]
a12 = P_rr[0, 1]
a13 = P_rr[0, 2]
a21 = P_rr[1, 0]
a22 = P_rr[1, 1]
a23 = P_rr[1, 2]
a31 = P_rr[2, 0]
a32 = P_rr[2, 1]
a33 = P_rr[2, 2]
b1 = P_rr[0, 3]
b2 = P_rr[1, 3]
b3 = P_rr[2, 3]
a11_ = inv_P_rr[0, 0]
a12_ = inv_P_rr[0, 1]
a13_ = inv_P_rr[0, 2]
a21_ = inv_P_rr[1, 0]
a22_ = inv_P_rr[1, 1]
a23_ = inv_P_rr[1, 2]
a31_ = inv_P_rr[2, 0]
a32_ = inv_P_rr[2, 1]
a33_ = inv_P_rr[2, 2]
b1_ = inv_P_rr[0, 3]
b2_ = inv_P_rr[1, 3]
b3_ = inv_P_rr[2, 3]
r11 = self.R[0, 0]
r12 = self.R[0, 1]
r13 = self.R[0, 2]
r21 = self.R[1, 0]
r22 = self.R[1, 1]
r23 = self.R[1, 2]
r31 = self.R[2, 0]
r32 = self.R[2, 1]
r33 = self.R[2, 2]
x_cc = p_cc[0]
y_cc = p_cc[1]
z_cc = p_cc[2]
x_rr = p_rr[0]
y_rr = p_rr[1]
z_rr = p_rr[2]
left_r1 = a11 * b1_ + a12 * b2_ + a13 * b3_ + b1 + x_rr * (a11 * a11_ + a12 * a21_ + a13 * a31_) + y_rr * (
a11 * a12_ + a12 * a22_ + a13 * a32_) + z_rr * (a11 * a13_ + a12 * a23_ + a13 * a33_)
left_r2 = a21 * b1_ + a22 * b2_ + a23 * b3_ + b2 + x_rr * (a11_ * a21 + a21_ * a22 + a23 * a31_) + y_rr * (
a12_ * a21 + a22 * a22_ + a23 * a32_) + z_rr * (a13_ * a21 + a22 * a23_ + a23 * a33_)
left_r3 = a31 * b1_ + a32 * b2_ + a33 * b3_ + b3 + x_rr * (a11_ * a31 + a21_ * a32 + a31_ * a33) + y_rr * (
a12_ * a31 + a22_ * a32 + a32_ * a33) + z_rr * (a13_ * a31 + a23_ * a32 + a33 * a33_)
right_r1 = r11 * x_cc + r12 * y_cc + r13 * z_cc
right_r2 = r21 * x_cc + r22 * y_cc + r23 * z_cc
right_r3 = r31 * x_cc + r32 * y_cc + r33 * z_cc
r1 = right_r1 - left_r1
r2 = right_r2 - left_r2
r3 = right_r3 - left_r3
r = np.array((r1, r2, r3))
A = np.array([[a11 - 1, a12, a13], [a21, a22 - 1, a23], [a31, a32, a33 - 1]])
return A, r
def insert_sample(self, T_vio, T_gps):
current_vio_rot = T_vio[:3, :3]
previous_vio_rot = self.P_cc[:3, :3]
angle = self.calcRotationDiff(previous_vio_rot, current_vio_rot)
if angle < self.rot_angle_threshold:
return False
else:
# append sample
previous_vio_euler = tf.transformations.euler_from_matrix(previous_vio_rot)
previous_vio_euler = np.array(previous_vio_euler)
current_vio_euler = tf.transformations.euler_from_matrix(current_vio_rot)
current_vio_euler | |
not the global
file must be deleted manually.
:return: an ID that can be used to retrieve the file.
"""
raise NotImplementedError()
@contextmanager
def writeGlobalFileStream(
self,
cleanup: bool = False,
basename: Optional[str] = None,
encoding: Optional[str] = None,
errors: Optional[str] = None,
) -> Iterator[Tuple[WriteWatchingStream, FileID]]:
"""
Similar to writeGlobalFile, but allows the writing of a stream to the job store.
The yielded file handle does not need to and should not be closed explicitly.
:param encoding: The name of the encoding used to decode the file. Encodings
are the same as for decode(). Defaults to None which represents binary mode.
:param errors: Specifies how encoding errors are to be handled. Errors are the
same as for open(). Defaults to 'strict' when an encoding is specified.
:param cleanup: is as in
:func:`toil.fileStores.abstractFileStore.AbstractFileStore.writeGlobalFile`.
:param basename: If supported by the backing JobStore, use the given
file basename so that when searching the job store with a query
matching that basename, the file will be detected.
:return: A context manager yielding a tuple of
1) a file handle which can be written to and
2) the toil.fileStores.FileID of the resulting file in the job store.
"""
with self.jobStore.write_file_stream(
str(self.jobDesc.jobStoreID), cleanup, basename, encoding, errors
) as (backingStream, fileStoreID):
# We have a string version of the file ID, and the backing stream.
# We need to yield a stream the caller can write to, and a FileID
# that accurately reflects the size of the data written to the
# stream. We assume the stream is not seekable.
# Make and keep a reference to the file ID, which is currently empty
fileID = FileID(fileStoreID, 0)
# Wrap the stream to increment the file ID's size for each byte written
wrappedStream = WriteWatchingStream(backingStream)
# When the stream is written to, count the bytes
def handle(numBytes: int) -> None:
# No scope problem here, because we don't assign to a fileID local
fileID.size += numBytes
wrappedStream.onWrite(handle)
yield wrappedStream, fileID
def _dumpAccessLogs(self) -> None:
"""
When something goes wrong, log a report.
Includes the files that were accessed while the file store was open.
"""
if len(self._accessLog) > 0:
logger.warning('Failed job accessed files:')
for item in self._accessLog:
# For each access record
if len(item) == 2:
# If it has a name, dump wit the name
logger.warning('Downloaded file \'%s\' to path \'%s\'', *item)
else:
# Otherwise dump without the name
logger.warning('Streamed file \'%s\'', *item)
def logAccess(
self, fileStoreID: Union[FileID, str], destination: Union[str, None] = None
) -> None:
"""
Record that the given file was read by the job.
(to be announced if the job fails)
If destination is not None, it gives the path that the file
was downloaded to. Otherwise, assumes that the file was streamed.
Must be called by :meth:`readGlobalFile` and :meth:`readGlobalFileStream`
implementations.
"""
if destination is not None:
self._accessLog.append((fileStoreID, destination))
else:
self._accessLog.append((fileStoreID,))
@abstractmethod
def readGlobalFile(
self,
fileStoreID: str,
userPath: Optional[str] = None,
cache: bool = True,
mutable: bool = False,
symlink: bool = False,
) -> str:
"""
Make the file associated with fileStoreID available locally.
If mutable is True, then a copy of the file will be created locally so
that the original is not modified and does not change the file for other
jobs. If mutable is False, then a link can be created to the file, saving
disk resources. The file that is downloaded will be executable if and only
if it was originally uploaded from an executable file on the local filesystem.
If a user path is specified, it is used as the destination. If a user path isn't
specified, the file is stored in the local temp directory with an encoded name.
The destination file must not be deleted by the user; it can only be
deleted through deleteLocalFile.
Implementations must call :meth:`logAccess` to report the download.
:param fileStoreID: job store id for the file
:param userPath: a path to the name of file to which the global file will
be copied or hard-linked (see below).
:param cache: Described in
:func:`toil.fileStores.CachingFileStore.readGlobalFile`
:param mutable: Described in
:func:`toil.fileStores.CachingFileStore.readGlobalFile`
:return: An absolute path to a local, temporary copy of the file keyed
by fileStoreID.
"""
raise NotImplementedError()
@abstractmethod
def readGlobalFileStream(
self,
fileStoreID: str,
encoding: Optional[str] = None,
errors: Optional[str] = None,
) -> ContextManager[Union[BinaryIO, TextIO]]:
"""
Read a stream from the job store; similar to readGlobalFile.
The yielded file handle does not need to and should not be closed explicitly.
:param encoding: the name of the encoding used to decode the file. Encodings
are the same as for decode(). Defaults to None which represents binary mode.
:param errors: an optional string that specifies how encoding errors are
to be handled. Errors are the same as for open(). Defaults to 'strict'
when an encoding is specified.
Implementations must call :meth:`logAccess` to report the download.
:return: a context manager yielding a file handle which can be read from.
"""
raise NotImplementedError()
def getGlobalFileSize(self, fileStoreID: Union[FileID, str]) -> int:
"""
Get the size of the file pointed to by the given ID, in bytes.
If a FileID or something else with a non-None 'size' field, gets that.
Otherwise, asks the job store to poll the file's size.
Note that the job store may overestimate the file's size, for example
if it is encrypted and had to be augmented with an IV or other
encryption framing.
:param fileStoreID: File ID for the file
:return: File's size in bytes, as stored in the job store
"""
# First try and see if the size is still attached
size = getattr(fileStoreID, 'size', None)
if size is None:
# It fell off
# Someone is mixing FileStore and JobStore file APIs,
# or serializing FileIDs as strings.
size = self.jobStore.get_file_size(fileStoreID)
return cast(int, size)
@abstractmethod
def deleteLocalFile(self, fileStoreID: Union[FileID, str]) -> None:
"""
Delete local copies of files associated with the provided job store ID.
Raises an OSError with an errno of errno.ENOENT if no such local copies
exist. Thus, cannot be called multiple times in succession.
The files deleted are all those previously read from this file ID via
readGlobalFile by the current job into the job's file-store-provided
temp directory, plus the file that was written to create the given file
ID, if it was written by the current job from the job's
file-store-provided temp directory.
:param fileStoreID: File Store ID of the file to be deleted.
"""
raise NotImplementedError()
@abstractmethod
def deleteGlobalFile(self, fileStoreID: Union[FileID, str]) -> None:
"""
Delete local files and then permanently deletes them from the job store.
To ensure that the job can be restarted if necessary, the delete will not
happen until after the job's run method has completed.
:param fileStoreID: the File Store ID of the file to be deleted.
"""
raise NotImplementedError()
# Functions used to read and write files directly between a source url
# and the job store.
@deprecated(new_function_name="import_file")
def importFile(
self, srcUrl: str, sharedFileName: Optional[str] = None
) -> Optional[FileID]:
return self.import_file(srcUrl, sharedFileName)
def import_file(
self, src_uri: str, shared_file_name: Optional[str] = None
) -> Optional[FileID]:
return self.jobStore.import_file(src_uri, shared_file_name=shared_file_name)
@deprecated(new_function_name='export_file')
def exportFile(self, jobStoreFileID: FileID, dstUrl: str) -> None:
return self.export_file(jobStoreFileID, dstUrl)
def export_file(self, file_id: FileID, dst_uri: str) -> None:
raise NotImplementedError()
# A utility method for accessing filenames
def _resolveAbsoluteLocalPath(self, filePath: str) -> str:
"""
Return the absolute path to filePath.
This is a wrapper for os.path.abspath because mac OS
symlinks /tmp and /var (the most common places for a default tempdir) to
/private/tmp and /private/var respectively.
:param filePath: The absolute or relative path to the file. If relative,
it must be relative to the local temp working dir
:return: Absolute path to key
"""
if os.path.isabs(filePath):
return os.path.abspath(filePath)
else:
return os.path.join(self.localTempDir, filePath)
class _StateFile:
"""Read and write dill-ed state dictionaries from/to a file into a namespace."""
def | |
<reponame>m-ajay/superset
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pandas as pd
from superset.charts.post_processing import pivot_df, table
def test_pivot_df_no_cols_no_rows_single_metric():
"""
Pivot table when no cols/rows and 1 metric are selected.
"""
# when no cols/rows are selected there are no groupbys in the query,
# and the data has only the metric(s)
df = pd.DataFrame.from_dict({"SUM(num)": {0: 80679663}})
assert (
df.to_markdown()
== """
| | SUM(num) |
|---:|------------:|
| 0 | 8.06797e+07 |
""".strip()
)
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=False,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) |
|:-----------------|----------------:|
| ('Total (Sum)',) | 8.06797e+07 |
""".strip()
)
# tranpose_pivot and combine_metrics do nothing in this case
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=True,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) |
|:-----------------|----------------:|
| ('Total (Sum)',) | 8.06797e+07 |
""".strip()
)
# apply_metrics_on_rows will pivot the table, moving the metrics
# to rows
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=True,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=True,
)
assert (
pivoted.to_markdown()
== """
| | ('Total (Sum)',) |
|:--------------|-------------------:|
| ('SUM(num)',) | 8.06797e+07 |
""".strip()
)
# showing totals
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=True,
show_rows_total=True,
show_columns_total=True,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('Total (Sum)',) |
|:-----------------|----------------:|-------------------:|
| ('Total (Sum)',) | 8.06797e+07 | 8.06797e+07 |
""".strip()
)
def test_pivot_df_no_cols_no_rows_two_metrics():
"""
Pivot table when no cols/rows and 2 metrics are selected.
"""
# when no cols/rows are selected there are no groupbys in the query,
# and the data has only the metrics
df = pd.DataFrame.from_dict({"SUM(num)": {0: 80679663}, "MAX(num)": {0: 37296}})
assert (
df.to_markdown()
== """
| | SUM(num) | MAX(num) |
|---:|------------:|-----------:|
| 0 | 8.06797e+07 | 37296 |
""".strip()
)
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=False,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('MAX(num)',) |
|:-----------------|----------------:|----------------:|
| ('Total (Sum)',) | 8.06797e+07 | 37296 |
""".strip()
)
# tranpose_pivot and combine_metrics do nothing in this case
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=True,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('MAX(num)',) |
|:-----------------|----------------:|----------------:|
| ('Total (Sum)',) | 8.06797e+07 | 37296 |
""".strip()
)
# apply_metrics_on_rows will pivot the table, moving the metrics
# to rows
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=True,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=True,
)
assert (
pivoted.to_markdown()
== """
| | ('Total (Sum)',) |
|:--------------|-------------------:|
| ('SUM(num)',) | 8.06797e+07 |
| ('MAX(num)',) | 37296 |
""".strip()
)
# when showing totals we only add a column, since adding a row
# would be redundant
pivoted = pivot_df(
df,
rows=[],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=True,
show_rows_total=True,
show_columns_total=True,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('MAX(num)',) | ('Total (Sum)',) |
|:-----------------|----------------:|----------------:|-------------------:|
| ('Total (Sum)',) | 8.06797e+07 | 37296 | 8.0717e+07 |
""".strip()
)
def test_pivot_df_single_row_two_metrics():
"""
Pivot table when a single column and 2 metrics are selected.
"""
df = pd.DataFrame.from_dict(
{
"gender": {0: "girl", 1: "boy"},
"SUM(num)": {0: 118065, 1: 47123},
"MAX(num)": {0: 2588, 1: 1280},
}
)
assert (
df.to_markdown()
== """
| | gender | SUM(num) | MAX(num) |
|---:|:---------|-----------:|-----------:|
| 0 | girl | 118065 | 2588 |
| 1 | boy | 47123 | 1280 |
""".strip()
)
pivoted = pivot_df(
df,
rows=["gender"],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=False,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('MAX(num)',) |
|:----------|----------------:|----------------:|
| ('boy',) | 47123 | 1280 |
| ('girl',) | 118065 | 2588 |
""".strip()
)
# transpose_pivot
pivoted = pivot_df(
df,
rows=["gender"],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=True,
combine_metrics=False,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)', 'boy') | ('SUM(num)', 'girl') | ('MAX(num)', 'boy') | ('MAX(num)', 'girl') |
|:-----------------|----------------------:|-----------------------:|----------------------:|-----------------------:|
| ('Total (Sum)',) | 47123 | 118065 | 1280 | 2588 |
""".strip()
)
# combine_metrics does nothing in this case
pivoted = pivot_df(
df,
rows=["gender"],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=True,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('MAX(num)',) |
|:----------|----------------:|----------------:|
| ('boy',) | 47123 | 1280 |
| ('girl',) | 118065 | 2588 |
""".strip()
)
# show totals
pivoted = pivot_df(
df,
rows=["gender"],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=False,
show_rows_total=True,
show_columns_total=True,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== """
| | ('SUM(num)',) | ('MAX(num)',) | ('Total (Sum)',) |
|:-----------------|----------------:|----------------:|-------------------:|
| ('boy',) | 47123 | 1280 | 48403 |
| ('girl',) | 118065 | 2588 | 120653 |
| ('Total (Sum)',) | 165188 | 3868 | 169056 |
""".strip()
)
# apply_metrics_on_rows
pivoted = pivot_df(
df,
rows=["gender"],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=False,
show_rows_total=True,
show_columns_total=True,
apply_metrics_on_rows=True,
)
assert (
pivoted.to_markdown()
== """
| | ('Total (Sum)',) |
|:-------------------------|-------------------:|
| ('SUM(num)', 'boy') | 47123 |
| ('SUM(num)', 'girl') | 118065 |
| ('SUM(num)', 'Subtotal') | 165188 |
| ('MAX(num)', 'boy') | 1280 |
| ('MAX(num)', 'girl') | 2588 |
| ('MAX(num)', 'Subtotal') | 3868 |
| ('Total (Sum)', '') | 169056 |
""".strip()
)
# apply_metrics_on_rows with combine_metrics
pivoted = pivot_df(
df,
rows=["gender"],
columns=[],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=True,
show_rows_total=True,
show_columns_total=True,
apply_metrics_on_rows=True,
)
assert (
pivoted.to_markdown()
== """
| | ('Total (Sum)',) |
|:---------------------|-------------------:|
| ('boy', 'SUM(num)') | 47123 |
| ('boy', 'MAX(num)') | 1280 |
| ('boy', 'Subtotal') | 48403 |
| ('girl', 'SUM(num)') | 118065 |
| ('girl', 'MAX(num)') | 2588 |
| ('girl', 'Subtotal') | 120653 |
| ('Total (Sum)', '') | 169056 |
""".strip()
)
def test_pivot_df_complex():
"""
Pivot table when a column, rows and 2 metrics are selected.
"""
df = pd.DataFrame.from_dict(
{
"state": {
0: "CA",
1: "CA",
2: "CA",
3: "FL",
4: "CA",
5: "CA",
6: "FL",
7: "FL",
8: "FL",
9: "CA",
10: "FL",
11: "FL",
},
"gender": {
0: "girl",
1: "boy",
2: "girl",
3: "girl",
4: "girl",
5: "girl",
6: "boy",
7: "girl",
8: "girl",
9: "boy",
10: "boy",
11: "girl",
},
"name": {
0: "Amy",
1: "Edward",
2: "Sophia",
3: "Amy",
4: "Cindy",
5: "Dawn",
6: "Edward",
7: "Sophia",
8: "Dawn",
9: "Tony",
10: "Tony",
11: "Cindy",
},
"SUM(num)": {
0: 45426,
1: 31290,
2: 18859,
3: 14740,
4: 14149,
5: 11403,
6: 9395,
7: 7181,
8: 5089,
9: 3765,
10: 2673,
11: 1218,
},
"MAX(num)": {
0: 2227,
1: 1280,
2: 2588,
3: 854,
4: 842,
5: 1157,
6: 389,
7: 1187,
8: 461,
9: 598,
10: 247,
11: 217,
},
}
)
assert (
df.to_markdown()
== """
| | state | gender | name | SUM(num) | MAX(num) |
|---:|:--------|:---------|:-------|-----------:|-----------:|
| 0 | CA | girl | Amy | 45426 | 2227 |
| 1 | CA | boy | Edward | 31290 | 1280 |
| 2 | CA | girl | Sophia | 18859 | 2588 |
| 3 | FL | girl | Amy | 14740 | 854 |
| 4 | CA | girl | Cindy | 14149 | 842 |
| 5 | CA | girl | Dawn | 11403 | 1157 |
| 6 | FL | boy | Edward | 9395 | 389 |
| 7 | FL | girl | Sophia | 7181 | 1187 |
| 8 | FL | girl | Dawn | 5089 | 461 |
| 9 | CA | boy | Tony | 3765 | 598 |
| 10 | FL | boy | Tony | 2673 | 247 |
| 11 | FL | girl | Cindy | 1218 | 217 |
""".strip()
)
pivoted = pivot_df(
df,
rows=["gender", "name"],
columns=["state"],
metrics=["SUM(num)", "MAX(num)"],
aggfunc="Sum",
transpose_pivot=False,
combine_metrics=False,
show_rows_total=False,
show_columns_total=False,
apply_metrics_on_rows=False,
)
assert (
pivoted.to_markdown()
== | |
"""
Created on Mon Aug 25 13:17:03 2014
@author: anthony
"""
import time
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate as interp
from .cp_tools import cp_loglikelihood
from .cp_tools import cp_loglikelihood_proj
from .cp_tools import cp_model
from .cp_tools import mas2rad
from .cp_tools import project_cps
from .cp_tools import rad2mas
def phase_binary_flux(u, v, wavel, p, return_cvis=False):
"""Calculate the phases observed by an array on a binary star
----------------------------------------------------------------
p: 3-component vector (+2 optional), the binary "parameters":
- p[0] = sep (mas)
- p[1] = PA (deg) E of N.
- p[2] = flux (primary is assumed to be 1)
optional:
- p[2:] = contrast ratio for several wavelengths that we want
to calculate the cps over
- u,v: baseline coordinates (meters)
- wavel: wavelength (meters)
----------------------------------------------------------------"""
p = np.array(p)
# relative locations
th = (p[1] + 90.0) * np.pi / 180.0
ddec = mas2rad(p[0] * np.sin(th))
dra = -mas2rad(p[0] * np.cos(th))
# decompose into two "luminosities"
# but first, a little trick so this works whether
# p is a single value or a list of contrasts
spec = p[2:]
if len(spec) == 1:
spec = spec[0]
l2 = spec
l1 = 1 - l2
# phase-factor
output_shape = list(u.shape)
output_shape[-1] = np.size(wavel)
phi = np.zeros(output_shape, dtype=complex)
phi.real = np.cos(-2 * np.pi * (u * dra + v * ddec) / wavel)
phi.imag = np.sin(-2 * np.pi * (u * dra + v * ddec) / wavel)
cvis = l1 + l2 * phi
phase = np.angle(cvis, deg=True)
if return_cvis:
return cvis
else:
return np.mod(phase + 10980.0, 360.0) - 180.0
# =========================================================================
def cp_model_flux(params, u, v, wavels, model="constant"):
"""Function to model closure phases. Takes a parameter list, u,v triangles and range of wavelengths.
Allows fitting of a model to contrast vs wavelength.
Models for contrast ratio:
constant (contrast is constant with wavelength, default)
linear (params[2,3]=contrast ratios at end wavelengths),
free (params[2:]=contrast ratios).
ndof (the wavelength channels are evenly spaced cubic interpolations in params[2:])
polynomial (of the form Sum[n] params[n+2]*(wavelength*1e6)**n )
NOTE: This doesn't allow for nonzero size of each component!"""
nwav = wavels.size
if model == "constant":
cons = np.repeat(params[2], nwav)
elif model == "linear":
cons = params[2] + (params[3] - params[2]) * (wavels - wavels[0]) / (
wavels[-1] - wavels[0]
)
elif model == "ndof":
ndof = params[2:].size
wavs = np.linspace(np.min(wavels), np.max(wavels), ndof)
f = interp.interp1d(wavs, params[2:], kind="cubic")
cons = f(wavels)
elif model == "free":
# no model, crat vs wav is free to vary.
cons = params[2:]
elif model == "polynomial":
coefficients = params[2:]
ndof = len(coefficients)
cons = np.repeat(0.0, nwav)
xax = (wavels - np.min(wavels)) / (np.max(wavels) - np.min(wavels))
for order in range(ndof):
cons += coefficients[order] * xax**order
else:
raise NameError("Unknown model input to cp_model")
# vectorize the arrays to speed up multi-wavelength calculations
u = u[..., np.newaxis] # (ncp x n_runs x 3 x 1) or (ncp x 3 x 1)
v = v[..., np.newaxis] # (ncp x n_runs x 3 x 1) or (ncp x 3 x 1)
wavels = wavels[np.newaxis, np.newaxis, :] # (1 x 1 x 1 x nwav) or (1x1xnwav)
if u.ndim == 4:
wavels = wavels[np.newaxis]
phases = phase_binary_flux(u, v, wavels, params)
cps = np.sum(phases, axis=-2)
return cps
# =========================================================================
# =========================================================================
def cp_loglikelihood_proj_flux(
params, u, v, wavel, proj_t3data, proj_t3err, proj, model="constant"
):
"""Calculate loglikelihood for projected closure phase data.
Used both in the MultiNest and MCMC Hammer implementations.
Here proj is the eigenvector array"""
# hacky way to introduce priors
# if (params[2] > 50000) or (params[2] < 0.):
# return -np.inf
if (params[0] > 350.0) or (params[0] < 0.0):
return -np.inf
if (params[1] > 360.0) or (params[1] < 0.0):
return -np.inf
cps = cp_model_flux(params, u, v, wavel, model=model)
proj_mod_cps = project_cps(cps, proj)
chi2 = np.sum(((proj_t3data - proj_mod_cps) / proj_t3err) ** 2)
loglike = -chi2 / 2
return loglike
# =========================================================================
def chi2_grid(everything):
"""Function for multiprocessing, does 2d chi2 grid for xy_grid"""
cpo = everything["cpo"]
chi2 = np.zeros((len(everything["ys"]), len(everything["cons"])))
x = everything["x"]
ys = everything["ys"]
seps = np.sqrt(x**2 + ys**2)
pas = np.angle(np.complex(0, 1) * ys + np.complex(1, 0) * x, True) % 360
projected = everything["projected"]
for ix in range(ys.size):
for k, con in enumerate(everything["cons"]):
params = [seps[ix], pas[ix], con]
if projected:
chi2[ix, k] = -2 * cp_loglikelihood_proj_flux(
params,
cpo.u,
cpo.v,
cpo.wavel,
cpo.proj_t3data,
cpo.proj_t3err,
cpo.proj,
)
else:
chi2[ix, k] = -2 * cp_loglikelihood(
params, cpo.u, cpo.v, cpo.wavel, cpo.t3data, cpo.t3err
)
return chi2
# =========================================================================
# =========================================================================
def xy_grid(
cpo,
nxy=30,
ncon=32,
xymax="Default",
cmin=10.0,
cmax=500.0,
threads=0,
err_scale=1.0,
extra_error=0.0,
fix_crat=False,
cmap="ds9cool",
plot_as_mags=False,
projected=False,
):
"""An attempt to copy Sylvestre's chi2 grid plots, using x and y instead
of separation and position angle.
Written by <NAME>, with some parts stolen from other pysco/pymask routines."""
# ------------------------
# first, load your data!
# ------------------------
ndata = cpo.ndata
u, v = cpo.u, cpo.v
cpo.t3err = np.sqrt(cpo.t3err**2 + extra_error**2)
cpo.t3err *= err_scale
wavel = cpo.wavel
w = np.array(np.sqrt(u**2 + v**2)) / np.median(wavel)
if xymax == "Default":
# xymax = cpt.rad2mas(1./np.min(w/np.max(wavel)))
xymax = rad2mas(1.0 / np.min(w))
# ------------------------
# initialise grid params
# ------------------------
xys = np.linspace(-xymax, xymax, nxy)
# cons = cmin + (cmax-cmin) * np.linspace(0,1,ncon)
cons = np.linspace(cmin, cmax, ncon)
if fix_crat != False:
cons = np.array([fix_crat])
ncon = 1
# ------------------------
# Calculate chi2 at each point
# ------------------------
tic = time.time() # start the clock
chi2 = np.zeros((nxy, nxy, ncon))
if threads == 0:
toc = time.time()
for ix, x in enumerate(xys):
everything = {
"x": x,
"cons": cons,
"ys": xys,
"cpo": cpo,
"ix": ix,
"projected": projected,
}
chi2[ix, :, :] = chi2_grid(everything)
if (ix % 50) == 0:
tc = time.time()
print("Done " + str(ix) + ". Time taken: " + str(tc - toc) + "seconds")
toc = tc
else:
all_vars = []
for ix in range(nxy):
everything = {
"x": xys[ix],
"cons": cons,
"ys": xys,
"cpo": cpo,
"ix": ix,
"projected": projected,
}
all_vars.append(everything)
pool = Pool(processes=threads)
chi2 = pool.map(chi2_grid, all_vars)
pool.close()
tf = time.time()
if tf - tic > 60:
print("Total time elapsed: " + str((tf - tic) / 60.0) + "mins")
elif tf - tic <= 60:
print("Total time elapsed: " + str(tf - tic) + " seconds")
chi2 = np.array(chi2)
best_ix = np.where(chi2 == np.amin(chi2))
# hack: if the best chi2 is at more than one location, take the first.
bestx = xys[best_ix[0][0]]
besty = xys[best_ix[1][0]]
sep = np.sqrt(bestx**2 + besty**2)
pa = np.angle(np.complex(bestx, besty), True) % 360
best_params = [sep, pa, cons[best_ix[2][0]]]
best_params = np.array(np.array(best_params).ravel())
print("Separation " + str(best_params[0]) + " mas")
print("Position angle " + str(best_params[1]) + " deg")
print("Contrast Ratio " + str(best_params[2]))
# ---------------------------------------------------------------
# sum over each variable so we can visualise it all
# ---------------------------------------------------------------
temp_chi2 = ndata * chi2 / np.amin(chi2)
like = np.exp(-(temp_chi2 - ndata) / 2)
x_y = np.sum(like, axis=2)
# ---------------------------------------------------------------
# contour plot!
# ---------------------------------------------------------------
names = ["Chi2", "Likelihood", "Best Contrast Ratio"]
plots = [np.min(chi2, axis=2), x_y, cons[np.argmin(chi2, axis=2)]]
for ix, n in enumerate(names):
plt.figure(n)
plt.clf()
# Plot it with RA on the X axis
plt.imshow(
plots[ix],
extent=[np.amin(xys), np.amax(xys), np.amin(xys), np.amax(xys)],
aspect="auto",
cmap=cmap,
)
plt.colorbar()
plt.ylabel("Dec (mas)")
plt.xlabel("RA (mas)")
plt.plot([0], [0], "wo")
plt.xlim(xys[-1], xys[0])
plt.ylim(xys[0], xys[-1])
# ---------------------------------------------------------------
# And the detection limits that come for free!
# ---------------------------------------------------------------
chi2_null = np.sum((cpo.t3data / cpo.t3err) ** 2)
# Define the detec limits to be the contrast at which chi2_binary - chi2_null < 25
detecs = (chi2 - chi2_null) < 25
detec_lim = np.zeros((nxy, nxy))
for x_ix in range(nxy):
for y_ix in range(nxy):
detectable_cons = cons[detecs[x_ix, y_ix, :]]
if len(detectable_cons) == 0:
detec_lim[x_ix, y_ix] = cons[-1]
else:
detec_lim[x_ix, y_ix] = np.min(detectable_cons)
if plot_as_mags:
detec_lim_plot = -2.5 * np.log10(detec_lim)
else:
detec_lim_plot = detec_lim
plt.figure(1)
plt.clf()
# plt.imshow(detec_lim,extent=(xys[0],xys[-1],xys[0],xys[-1]),cmap=cmap)
# Plot it with RA on the X axis
plt.imshow(detec_lim_plot, extent=(xys[0], xys[-1], xys[0], xys[-1]), cmap=cmap)
plt.colorbar()
plt.title("Detection limits")
plt.xlabel("RA (mas)")
plt.ylabel("Dec (mas)")
plt.xlim(xys[-1], xys[0])
plt.ylim(xys[0], xys[-1])
# And we should also print whether the likelihood peak is a detection
# according to the limits we just calculated
| |
import numpy as np
import function
class Conv2d(object):
"""
Implements the 2D convolutional layer.
"""
def __init__(self, in_channels, out_channels, kernel_size, strides=1, padding=(0, 0), num_pads=1):
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = kernel_size
self.strides = strides
self.padding = padding
self.num_pads = num_pads
w, b = self.init_parameters()
self.params = {'w': w, 'b': b}
self.grads = {'dw': np.zeros(w.shape), 'db': np.zeros(b.shape)}
self.cache = None
def forward(self, x):
"""
Implements the forward pass for a convolutional layer.
:param x: input numpy array features with shape: (batch_size, number of channels, Height, Width)
:return: output numpy array: Z, shape: (batch_size, number of filter, n_H, n_W)
"""
# Get input size
batch_size, num_channels, H, W = x.shape
# Compute the dimensions of the output height and width
n_H = int((H - self.kernel_size + 2 * self.num_pads) / self.strides) + 1
n_W = int((W - self.kernel_size + 2 * self.num_pads) / self.strides) + 1
# Initialize the output Z with zeros
Z = np.zeros((batch_size, self.out_channels, n_H, n_W))
# Create x_pad by padding x
x_pad = self.pad(x, self.num_pads, self.padding)
# Convolution step
index_i, index_j, index_d = function.get_indices(x.shape, Z.shape, self.kernel_size, self.strides)
cols = x_pad[:, index_d, index_i, index_j]
x_cols = np.concatenate(cols, axis=-1)
w_col = np.reshape(self.params['w'], (self.out_channels, -1))
b_col = np.reshape(self.params['b'], (-1, 1))
# Perform matrix multiplication
output = np.matmul(w_col, x_cols) + b_col
# Reshape back matrix to image.
output = np.array(np.hsplit(output, batch_size)).reshape((batch_size, self.out_channels, n_H, n_W))
# Final check the out size.
assert (output.shape == (batch_size, self.out_channels, n_H, n_W))
Z = output
# Save the x, x_cols, w_col for backward
self.cache = x, x_cols, w_col
return Z
def init_parameters(self):
"""
Initialize parameters with Xavier initialization. Sets a layer’s parameters to values chosen from a random
uniform distribution.
:return: weights, shape: (out_channels, in_channels, kernel_size, kernel_size)
biases, shape: (out_channels,)
"""
bound = 1 / np.sqrt(self.kernel_size * self.kernel_size)
weights = np.random.uniform(-bound, bound,
size=(self.out_channels, self.in_channels, self.kernel_size, self.kernel_size))
biases = np.random.uniform(-bound, bound, size=self.out_channels)
return weights, biases
def pad(self, x, n, padding):
"""
Pad with zeros all images of the dataset X. The padding is applied to the height and width of an image.
:param x: numpy array of shape (batch_size, n_C, n_H, n_W)
:param n: integer, amount of padding around each image on vertical and horizontal dimensions
:param padding: a tuple of padding value
:return: X_pad -- padded image of shape (batch_size, n_C, n_H + 2*n, n_W + 2*n)
"""
x_pad = np.pad(x, ((0, 0), (0, 0), (n, n), (n, n)), 'constant', constant_values=padding)
return x_pad
def backward(self, dz):
"""
Implement the backward propagation for a convolutional layer.
:param dz: gradient of the cost with respect to the output of the conv layer (Z), shape: (batch_size, n_C, n_H, n_W)
:return: - dX: error of the current convolutional layer.
- self.dw: weights gradient.
- self.db: bias gradient.
"""
# Get the output of previous layer
x, x_cols, w_col = self.cache
output_shape = dz.shape
# Initialize dx
dx = np.zeros(x.shape)
# Pad dx
dx_pad = self.pad(dx, self.num_pads, self.padding)
# Get batch size
batch_size = x.shape[0]
# Compute bias gradient
self.grads['db'] = np.sum(dz, axis=(0, 2, 3))
# Reshape dz properly.
dz = np.reshape(dz, (dz.shape[0] * dz.shape[1], dz.shape[2] * dz.shape[3]))
dz = np.array(np.vsplit(dz, batch_size))
dz = np.concatenate(dz, axis=-1)
# Perform matrix multiplication between reshaped dz and w_col to get dx_cols.
# Compute the gradient of previous layer' output
dx_cols = np.matmul(w_col.T, dz)
# Compute weight gradient
dw_col = np.matmul(dz, x_cols.T)
index_i, index_j, index_d = function.get_indices(x.shape, output_shape, self.kernel_size, self.strides)
dx_cols_reshaped = np.array(np.hsplit(dx_cols, batch_size))
# Reshape matrix back to image
np.add.at(dx_pad, (slice(None), index_d, index_i, index_j), dx_cols_reshaped)
# Remove padding from new image.
dx = dx_pad[:, :, self.num_pads:-self.num_pads, self.num_pads:-self.num_pads]
# Reshape dw_col into dw.
self.grads['dw'] = np.reshape(dw_col, (self.out_channels, self.in_channels, self.kernel_size, self.kernel_size))
# Final check the output shape is correct
assert (dx.shape == x.shape)
return dx
class MaxPooling2d(object):
"""
Implent 2D max pooling layer.
"""
def __init__(self, kernel_size, strides):
self.kernel_size = kernel_size
self.strides = strides
self.cache = None
def forward(self, x):
"""
Implements the forward pass of the Max pooling layer.
:param x: input feature, shape: (batch_size, number of channels, Height, Width)
:return: a numpy array of shape (batch_size, n_C, n_H, n_W).
"""
# Save the x for backward
self.cache = x
# Get the input shape of x
batch_size, num_channels, H, W = x.shape
# Compute the dimensions of the CONV output volume
n_H = int((H - self.kernel_size) / self.strides) + 1
n_W = int((W - self.kernel_size) / self.strides) + 1
n_C = num_channels
# Initialize output matrix A
A = np.zeros((batch_size, n_C, n_H, n_W))
# Pooling step
for i in range(batch_size): # loop over the batch size
for c in range(n_C): # loop on the vertical axis of the output volume
for h in range(n_H): # loop on the horizontal axis of the output volume
vert_top = h * self.strides
vert_bottom = vert_top + self.kernel_size
for w in range(n_W): # loop over the channels of the output volume
horiz_left = w * self.strides
horiz_right = horiz_left + self.kernel_size
# Find the corners of the current "slice".
slice_map = x[i, c, vert_top:vert_bottom, horiz_left:horiz_right]
# Compute the max pooling operation on the slice. Use np.max
A[i, c, h, w] = np.max(slice_map)
# Final check the output shape is correct
assert(A.shape == (batch_size, n_C, n_H, n_W))
return A
def backward(self, dz):
"""
Implements the backward pass of the max pooling layer
:param dz: gradient of cost with respect to the output of the max pooling layer
:return: dx, gradient of cost with respect to the input of the pooling layer
"""
# Get the previous layer's output
x = self.cache
# Get dz shape
batch_size, n_C, n_H, n_W = dz.shape
# Initialize dx with zeros
dx = np.zeros(x.shape)
for i in range(batch_size): # loop over the batch size
for c in range(n_C):
for h in range(n_H):
vert_top = h * self.strides
vert_bottom = vert_top + self.kernel_size
for w in range(n_W):
horiz_left = w * self.strides
horiz_right = horiz_left + self.kernel_size
# Use the corners and "c" to define the current slice from slice_map
slice_map = x[i, c, vert_top:vert_bottom, horiz_left:horiz_right]
# Create the mask from slice_map
mask = function.create_mask_from_window(slice_map)
# Set dx to be dx + (the mask multiplied by the correct entry of dx)
dx[i, c, vert_top:vert_bottom, horiz_left:horiz_right] += np.multiply(mask, dz[i, c, h, w])
# Final check the output shape is correct
assert (dx.shape == x.shape)
return dx
class Flatten(object):
"""
Reshape the input feature
"""
def __init__(self):
self.forward_shape = None
def forward(self, x):
self.forward_shape = x.shape
x_flatten = np.reshape(x, (self.forward_shape[0], -1))
return x_flatten
def backward(self, dz):
dz = np.reshape(dz, self.forward_shape)
return dz
class Dense(object):
"""
Implement fully connected layer.
"""
def __init__(self, in_dims, out_dims):
self.in_dims = in_dims
self.out_dims = out_dims
w, b = self.init_parameters()
self.params = {'w': w, 'b': b}
self.grads = {'dw': np.zeros(self.params['w'].shape),
'db': np.zeros(self.params['b'].shape)}
self.cache = None
def init_parameters(self):
weights = np.random.randn(self.out_dims, self.in_dims) * np.sqrt(1. / self.in_dims)
biases = np.random.randn(1, self.out_dims) * np.sqrt(1. / self.in_dims)
return weights, biases
def forward(self, x):
"""
Implement forward pass for fully connected layer.
:param x: input feature, shape: (batch_size, in_dims)
:return: out, shape: (batch_size, out_dims)
"""
# Save the x for backward
self.cache = x
z = np.matmul(x, self.params['w'].T) + self.params['b']
return z
def backward(self, dz):
"""
Implements the backward pass for fully connected layer
:param dz: gradient of cost with respect to the output of the max pooling layer
:return: dx, gradient of cost with respect to the input of the pooling layer
"""
x = self.cache
batch_size = x.shape[0]
self.grads['dw'] = (1. / batch_size) * np.matmul(dz.T, x)
self.grads['db'] = (1. / batch_size) * np.sum(dz, axis=0, keepdims=True)
dx = np.matmul(dz, self.params['w'])
return dx
class ReLU(object):
"""
Implement:
ReLU activation function,
ReLU(x)=max(0,x).
Derivative of ReLU,
1 for x >= 0, 0 for x < 0.
"""
def __init__(self):
self.cache = None
def forward(self, x):
self.cache = x
return np.maximum(0, x)
def backward(self, dz):
x = self.cache
dx = dz * np.where(x <= 0, 0, | |
x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_gskernel.GsScreenDisplay_swiginit(self, _gskernel.new_GsScreenDisplay(*args))
__swig_destroy__ = _gskernel.delete_GsScreenDisplay
def BindDevice(self, pDevice: 'GsPaintDevice') -> "void":
r""" 绑定设备"""
return _gskernel.GsScreenDisplay_BindDevice(self, pDevice)
def BeginRecording(self, order: 'int') -> "GsImageCanvas *":
r""" 开始记录。"""
return _gskernel.GsScreenDisplay_BeginRecording(self, order)
def RecordingCanvas(self) -> "GsImageCanvas *":
r""" 正用于记录的画布"""
return _gskernel.GsScreenDisplay_RecordingCanvas(self)
def EndRecording(self) -> "void":
r""" 结束记录"""
return _gskernel.GsScreenDisplay_EndRecording(self)
def PanStart(self, x: 'double', y: 'double') -> "void":
r"""
从地理坐标开始漫游 :type x: float
:param x: 地理x坐标 :type y: float
:param y: 地理y坐标
"""
return _gskernel.GsScreenDisplay_PanStart(self, x, y)
def PanMoveTo(self, x: 'double', y: 'double') -> "void":
r"""
漫游到地理坐标 :type x: float
:param x: 地理x坐标 :type y: float
:param y: 地理y坐标
"""
return _gskernel.GsScreenDisplay_PanMoveTo(self, x, y)
def PanStop(self) -> "GsBox":
r"""
停止漫游 :rtype: :py:class:`GsBox`
:return: 返回漫游结束时应该实现的地理范围
"""
return _gskernel.GsScreenDisplay_PanStop(self)
def HasStartPan(self) -> "bool":
return _gskernel.GsScreenDisplay_HasStartPan(self)
def Paint(self, *args) -> "void":
r"""
将缓存的内容绘制到设备上 :type pt: :py:class:`GsPT`
:param pt: 绘制偏移的像素坐标
"""
return _gskernel.GsScreenDisplay_Paint(self, *args)
def OnSizeChanged(self) -> "void":
r""" 当绘制设备发生变化时"""
return _gskernel.GsScreenDisplay_OnSizeChanged(self)
def StartDrawing(self) -> "void":
r""" 开始绘制"""
return _gskernel.GsScreenDisplay_StartDrawing(self)
def EndDrawing(self) -> "void":
r""" 结束绘制"""
return _gskernel.GsScreenDisplay_EndDrawing(self)
def Flush(self) -> "void":
r""" 提交绘制结果到屏幕"""
return _gskernel.GsScreenDisplay_Flush(self)
# Register GsScreenDisplay in _gskernel:
_gskernel.GsScreenDisplay_swigregister(GsScreenDisplay)
eDrawNormal = _gskernel.eDrawNormal
r""" 通用绘制"""
eDrawSelectionSet = _gskernel.eDrawSelectionSet
r""" 绘制选择集"""
class GsLayer(GsRefObject):
r""" 图形的抽象基类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsLayer
def Name(self, *args) -> "void":
r"""
*Overload 1:*
图层的名称
|
*Overload 2:*
设置图层的名称
"""
return _gskernel.GsLayer_Name(self, *args)
def AliasName(self, *args) -> "void":
r"""
*Overload 1:*
图层的别名
|
*Overload 2:*
设置图层的别名
"""
return _gskernel.GsLayer_AliasName(self, *args)
def Visible(self, *args) -> "void":
r"""
*Overload 1:*
图层是否可见
|
*Overload 2:*
设置图层是否可见
"""
return _gskernel.GsLayer_Visible(self, *args)
def ReferenceScale(self, *args) -> "void":
r"""
*Overload 1:*
参考比例尺
|
*Overload 2:*
参考比例尺
"""
return _gskernel.GsLayer_ReferenceScale(self, *args)
def MinScale(self, *args) -> "void":
r"""
*Overload 1:*
图层最小可见比例尺
|
*Overload 2:*
设置图层最小可见比例尺
"""
return _gskernel.GsLayer_MinScale(self, *args)
def MaxScale(self, *args) -> "void":
r"""
*Overload 1:*
图层最大可见比例尺
|
*Overload 2:*
设置图层最大可见比例尺
"""
return _gskernel.GsLayer_MaxScale(self, *args)
def Tag(self, *args) -> "void":
r"""
*Overload 1:*
图层的名称
|
*Overload 2:*
设置图层的名称
"""
return _gskernel.GsLayer_Tag(self, *args)
def IsValid(self) -> "bool":
r""" 是否有效"""
return _gskernel.GsLayer_IsValid(self)
def Extent(self, pTargetSR: 'GsSpatialReference'=None) -> "GsBox":
r""" 图层的最大范围,以图层的空间参考显示"""
return _gskernel.GsLayer_Extent(self, pTargetSR)
def HasSelection(self) -> "bool":
r""" 图层是否存在选择集"""
return _gskernel.GsLayer_HasSelection(self)
def SpatialReference(self) -> "GsSpatialReference *":
r""" 图层的空间参考"""
return _gskernel.GsLayer_SpatialReference(self)
def Draw(self, pDisplay: 'GsDisplay', pCancel: 'GsTrackCancel', eDrawPhase: 'GsDrawPhase') -> "bool":
r"""
图形绘制入口 :param pDislay: 绘制对象 :type pCancel: :py:class:`GsTrackCancel`
:param pCancel: 绘制取消对象
"""
return _gskernel.GsLayer_Draw(self, pDisplay, pCancel, eDrawPhase)
# Register GsLayer in _gskernel:
_gskernel.GsLayer_swigregister(GsLayer)
class GsLayerVector(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_gskernel.GsLayerVector_swiginit(self, _gskernel.new_GsLayerVector())
__swig_destroy__ = _gskernel.delete_GsLayerVector
def add(self, obj: 'GsLayer') -> "void":
return _gskernel.GsLayerVector_add(self, obj)
def size(self) -> "size_t":
return _gskernel.GsLayerVector_size(self)
def clear(self) -> "void":
return _gskernel.GsLayerVector_clear(self)
def empty(self) -> "bool":
return _gskernel.GsLayerVector_empty(self)
def get(self, n: 'int') -> "GsSmarterPtr< GsLayer >":
return _gskernel.GsLayerVector_get(self, n)
def set(self, n: 'int', val: 'GsLayer') -> "void":
return _gskernel.GsLayerVector_set(self, n, val)
# Register GsLayerVector in _gskernel:
_gskernel.GsLayerVector_swigregister(GsLayerVector)
class GsMultiLayer(GsLayer):
r""" 复合图层"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_gskernel.GsMultiLayer_swiginit(self, _gskernel.new_GsMultiLayer())
__swig_destroy__ = _gskernel.delete_GsMultiLayer
def Layers(self) -> "GsVector< GsSmarterPtr< GsLayer > > *":
r""" 获取图层列表对象指针"""
return _gskernel.GsMultiLayer_Layers(self)
def Extent(self, pTargetSR: 'GsSpatialReference'=None) -> "GsBox":
r""" 图层的最大范围"""
return _gskernel.GsMultiLayer_Extent(self, pTargetSR)
def HasSelection(self) -> "bool":
r""" 图层是否存在选择集"""
return _gskernel.GsMultiLayer_HasSelection(self)
def SpatialReference(self) -> "GsSpatialReference *":
r""" 图层的空间参考"""
return _gskernel.GsMultiLayer_SpatialReference(self)
def IsValid(self) -> "bool":
r""" 是否有效"""
return _gskernel.GsMultiLayer_IsValid(self)
# Register GsMultiLayer in _gskernel:
_gskernel.GsMultiLayer_swigregister(GsMultiLayer)
class GsLayerCollection(GsRefObject):
r""" 图层集合"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self):
_gskernel.GsLayerCollection_swiginit(self, _gskernel.new_GsLayerCollection())
__swig_destroy__ = _gskernel.delete_GsLayerCollection
def Count(self) -> "int":
r""" 图层的数量"""
return _gskernel.GsLayerCollection_Count(self)
def Layer(self, i: 'int') -> "GsLayer *":
r""" 根据索引获得图层对象"""
return _gskernel.GsLayerCollection_Layer(self, i)
def Add(self, lyr: 'GsLayer') -> "bool":
r""" 添加一个图层到图层末尾"""
return _gskernel.GsLayerCollection_Add(self, lyr)
def Insert(self, i: 'int', lyr: 'GsLayer') -> "bool":
r""" 在特定为位置插入一个图层"""
return _gskernel.GsLayerCollection_Insert(self, i, lyr)
def Clear(self) -> "void":
r""" 清除图层"""
return _gskernel.GsLayerCollection_Clear(self)
def Remove(self, *args) -> "bool":
r"""
*Overload 1:*
删除指定的图层
|
*Overload 2:*
删除指定的图层
"""
return _gskernel.GsLayerCollection_Remove(self, *args)
def Move(self, lyr: 'GsLayer', i: 'int') -> "bool":
r""" 将图层移动到特定的位置"""
return _gskernel.GsLayerCollection_Move(self, lyr, i)
def IndexOf(self, lyr: 'GsLayer') -> "int":
r""" 获取图层指针在集合中的索引"""
return _gskernel.GsLayerCollection_IndexOf(self, lyr)
def Clone(self) -> "GsSmarterPtr< GsLayerCollection >":
r""" 克隆图层集合"""
return _gskernel.GsLayerCollection_Clone(self)
def Layers(self) -> "GsVector< GsSmarterPtr< GsLayer > > *":
r""" 获取图层数组"""
return _gskernel.GsLayerCollection_Layers(self)
# Register GsLayerCollection in _gskernel:
_gskernel.GsLayerCollection_swigregister(GsLayerCollection)
class GsMap(GsRefObject):
r""" 地图对象 多个图层绘制"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, pDisplay: 'GsScreenDisplay'):
r""" 根据屏幕显示对象构造"""
_gskernel.GsMap_swiginit(self, _gskernel.new_GsMap(pDisplay))
__swig_destroy__ = _gskernel.delete_GsMap
def TrackCancel(self) -> "GsTrackCancel *":
r""" 取消对象指针"""
return _gskernel.GsMap_TrackCancel(self)
def ScreenDisplay(self) -> "GsScreenDisplay *":
r""" 获取屏幕显示对象"""
return _gskernel.GsMap_ScreenDisplay(self)
def Cancel(self) -> "void":
r""" 取消当前绘制"""
return _gskernel.GsMap_Cancel(self)
def IsDrawing(self) -> "bool":
r""" 地图是否处于绘制状态"""
return _gskernel.GsMap_IsDrawing(self)
def Layers(self) -> "GsVector< GsSmarterPtr< GsLayer > > *":
r""" 获取图层列表对象指针"""
return _gskernel.GsMap_Layers(self)
def DynamicLayers(self) -> "GsVector< GsSmarterPtr< GsLayer > > *":
return _gskernel.GsMap_DynamicLayers(self)
def LayerCollection(self) -> "GsSmarterPtr< GsLayerCollection >":
return _gskernel.GsMap_LayerCollection(self)
def DynamicLayerCollection(self) -> "GsSmarterPtr< GsLayerCollection >":
return _gskernel.GsMap_DynamicLayerCollection(self)
def Update(self) -> "void":
r""" 强制刷新地图 使地图失效,强制刷新"""
return _gskernel.GsMap_Update(self)
def Paint(self, eReason: 'GsDrawPhase'=eDrawNormal) -> "void":
r""" 绘制地图 如果地图数据未失效,则直接绘制缓冲的位图,反之则重新绘制"""
return _gskernel.GsMap_Paint(self, eReason)
def Invalidate(self) -> "void":
r""" 使地图失效"""
return _gskernel.GsMap_Invalidate(self)
def IsValid(self) -> "bool":
r""" 判断是否是否已经失效"""
return _gskernel.GsMap_IsValid(self)
def FullExtent(self) -> "GsBox":
r""" 全图范围 所有图层的范围合并的范围"""
return _gskernel.GsMap_FullExtent(self)
def ViewExtent(self, *args) -> "GsBox":
r"""
*Overload 1:*
设置当前显示范围 会取消当前地图绘制,同时使得地图失效,但不会立刻刷新地图,需要额外调用Paint或者Update刷新地图
|
*Overload 2:*
获取当前显示范围 相当于调用方法Display()->DisplayTransformation()->MapExtent();
"""
return _gskernel.GsMap_ViewExtent(self, *args)
def Output(self, pDisp: 'GsDisplay', pCancel: 'GsTrackCancel') -> "bool":
r"""
将地图数据绘制到输入的Display上。 调用者需要在外部输入启动Display的StartDraw,并在输出结束后调用EndDraw。 :type pDisp: :py:class:`GsDisplay`
:param pDisp: 需要输出数据的Display
"""
return _gskernel.GsMap_Output(self, pDisp, pCancel)
def Clone(self) -> "GsSmarterPtr< GsMap >":
return _gskernel.GsMap_Clone(self)
# Register GsMap in _gskernel:
_gskernel.GsMap_swigregister(GsMap)
class GsRendition(GsRefObject):
r""" 渲染器,实现数据的渲染。"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsRendition
def Begin(self, pDisplay: 'GsDisplay') -> "void":
r""" 开始渲染"""
return _gskernel.GsRendition_Begin(self, pDisplay)
def End(self) -> "void":
r""" 结束渲染"""
return _gskernel.GsRendition_End(self)
def TextMinScale(self, *args) -> "double":
return _gskernel.GsRendition_TextMinScale(self, *args)
def TextMaxScale(self, *args) -> "double":
return _gskernel.GsRendition_TextMaxScale(self, *args)
# Register GsRendition in _gskernel:
_gskernel.GsRendition_swigregister(GsRendition)
class GsFeatureRendition(GsRendition):
r""" 矢量数据渲染器"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsFeatureRendition
def Render(self, pFea: 'GsFeature', pSym: 'GsSymbol') -> "bool":
r""" 渲染一个地物"""
return _gskernel.GsFeatureRendition_Render(self, pFea, pSym)
# Register GsFeatureRendition in _gskernel:
_gskernel.GsFeatureRendition_swigregister(GsFeatureRendition)
class GsLabelRendition(GsFeatureRendition):
r""" 标注渲染器"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, pLabelProperty: 'GsLabelProperty'):
_gskernel.GsLabelRendition_swiginit(self, _gskernel.new_GsLabelRendition(pLabelProperty))
__swig_destroy__ = _gskernel.delete_GsLabelRendition
def Begin(self, pDisplay: 'GsDisplay') -> "void":
r""" 开始渲染"""
return _gskernel.GsLabelRendition_Begin(self, pDisplay)
def End(self) -> "void":
r""" 结束渲染"""
return _gskernel.GsLabelRendition_End(self)
def Render(self, pFea: 'GsFeature', pSym: 'GsSymbol') -> "bool":
r""" 渲染一个地物"""
return _gskernel.GsLabelRendition_Render(self, pFea, pSym)
def LabelProperty(self) -> "GsLabelProperty *":
r""" 返回 LabelProperty"""
return _gskernel.GsLabelRendition_LabelProperty(self)
# Register GsLabelRendition in _gskernel:
_gskernel.GsLabelRendition_swigregister(GsLabelRendition)
class GsFeatureRenderer(GsRefObject):
r""" 地物渲染器抽象基类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsFeatureRenderer
def Rendition(self, nIndex: 'int'=0) -> "GsFeatureRendition *":
r""" 获取渲染器"""
return _gskernel.GsFeatureRenderer_Rendition(self, nIndex)
def AddRendition(self, pRen: 'GsFeatureRendition') -> "void":
r""" 设置渲染器"""
return _gskernel.GsFeatureRenderer_AddRendition(self, pRen)
def DrawCursor(self, pFeaCursor: 'GsFeatureCursor', pDisplay: 'GsDisplay', pCancel: 'GsTrackCancel') -> "void":
r""" 绘制一个地物游标中的数据"""
return _gskernel.GsFeatureRenderer_DrawCursor(self, pFeaCursor, pDisplay, pCancel)
def CoordinateTransformation(self, *args) -> "void":
r"""
*Overload 1:*
获取坐标转换对象
|
*Overload 2:*
设置坐标转换对象
"""
return _gskernel.GsFeatureRenderer_CoordinateTransformation(self, *args)
# Register GsFeatureRenderer in _gskernel:
_gskernel.GsFeatureRenderer_swigregister(GsFeatureRenderer)
class GsRasterRenderer(GsRefObject):
r""" 栅格数据渲染器抽象基类"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined")
__repr__ = _swig_repr
__swig_destroy__ = _gskernel.delete_GsRasterRenderer
def DrawCursor(self, pRasterCursor: 'GsRasterCursor', pDisplay: 'GsDisplay', pCancel: 'GsTrackCancel') -> "void":
r""" 绘制一个地物游标中的数据"""
return _gskernel.GsRasterRenderer_DrawCursor(self, pRasterCursor, pDisplay, pCancel)
def Transparency(self, dblTrans: 'double') -> "void":
r""" 设置透明度,取值从0到1"""
return _gskernel.GsRasterRenderer_Transparency(self, dblTrans)
# Register GsRasterRenderer in _gskernel:
_gskernel.GsRasterRenderer_swigregister(GsRasterRenderer)
class GsRGBARasterRenderer(GsRasterRenderer):
r""" RGBA渲染器 游标中所有的像素统一使用RGBA值进行绘制"""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, *args):
_gskernel.GsRGBARasterRenderer_swiginit(self, _gskernel.new_GsRGBARasterRenderer(*args))
__swig_destroy__ | |
<reponame>liudger/ml_tools<filename>scripts/ml_breakdown.py
# -= ml_breakdown.py =-
# __ by <NAME>
# ____ ___ / / http://morganloomis.com
# / __ `__ \/ / Revision 4
# / / / / / / / 2018-05-13
# /_/ /_/ /_/_/ _________
# /_________/
#
# ______________
# - -/__ License __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# ___________________
# - -/__ Installation __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copy this file into your maya scripts directory, for example:
# C:/Documents and Settings/user/My Documents/maya/scripts/ml_breakdown.py
#
# Run the tool in a python shell or shelf button by importing the module,
# and then calling the primary function:
#
# import ml_breakdown
# ml_breakdown.ui()
#
#
# __________________
# - -/__ Description __/- - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Blend a keyframe or pose with the next or previous keys, essentially creating a
# breakdown pose that is weighted one way or the other.
#
# ____________
# - -/__ Usage __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Press the "Breakdown Dragger" button to enter the dragger, and the cursor will
# turn into a hand. Left-click and hold in the viewport, and then drag either left
# or right to weight the key to the next or previous key. Press and hold the
# middle mouse button to weight the key toward or away from the average of the
# surrounding keys. Alternately, set the slider to the desired weight, and press
# the Next, Previous or Average buttons to increment the breakdown. Right click
# the buttons to assign to hotkeys. If you have no keys selected, the tool will
# act only on curves that are visibile in the graph editor. If there are no keys
# at the current frame, keys will be set.
#
# ____________
# - -/__ Video __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# http://www.youtube.com/watch?v=D8yD4zbHTP8
#
# _________
# - -/__ Ui __/- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# [Breakdown Dragger] : Drag in the viewport to weight a breakdown toward the next or previous frame.
# [<<] : Weight toward the previous frame.
# [Average] : Weight toward the average of the next and previous frame.
# [>>] : Weight toward the next frame.
#
# ___________________
# - -/__ Requirements __/- - - - - - - - - - - - - - - - - - - - - - - - - -
#
# This script requires the ml_utilities module, which can be downloaded here:
# https://raw.githubusercontent.com/morganloomis/ml_tools/master/ml_utilities.py
#
# __________
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - /_ Enjoy! _/- - -
__author__ = '<NAME>'
__license__ = 'MIT'
__revision__ = 4
__category__ = 'animation'
shelfButton = {'annotation': 'Click to weight keys by dragging, double click to open UI.',
'command': 'import ml_breakdown;ml_breakdown.drag()',
'doubleClickCommand': 'import ml_breakdown;ml_breakdown.ui()',
'imageOverlayLabel': 'BD',
'menuItem': [['Breakdown UI', 'import ml_breakdown;ml_breakdown.ui()'],
['<< Previous', 'import ml_breakdown;ml_breakdown.weightPrevious()'],
['>> Next', 'import ml_breakdown;ml_breakdown.weightNext()'],
['Average', 'import ml_breakdown;ml_breakdown.weightAverage()']],
'order': 12}
import maya.cmds as mc
from maya import OpenMaya
from functools import partial
try:
import ml_utilities as utl
utl.upToDateCheck(32)
except ImportError:
result = mc.confirmDialog( title='Module Not Found',
message='This tool requires the ml_utilities module. Once downloaded you will need to restart Maya.',
button=['Download Module','Cancel'],
defaultButton='Cancel', cancelButton='Cancel', dismissString='Cancel' )
if result == 'Download Module':
mc.showHelp('http://morganloomis.com/tool/ml_utilities/',absolute=True)
def ui():
'''
User interface for breakdown
'''
with utl.MlUi('ml_breakdown', 'Breakdown Tools', width=400, height=180, info='''Select objects.
Press Breakdown Dragger to create a new key and weight it by dragging in the viewport.
Otherwise use the increment buttons to nudge a key's value toward the next or previous key.''') as win:
win.buttonWithPopup(label='Breakdown Dragger', command=drag, annotation='Drag in the viewport to weight a breakdown toward the next or previous frame.',
shelfLabel='BDD')
mc.separator(height=20)
mc.floatSliderGrp('ml_breakdown_value_floatSlider', value=0.2, field=True, minValue=0, maxValue=2)
mc.paneLayout(configuration='vertical3',separatorThickness=1)
win.ButtonWithPopup(label='<<', command=weightPrevious, annotation='Weight toward the previous frame.', shelfLabel='<', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
win.ButtonWithPopup(label='Average', command=weightAverage, annotation='Weight toward the average of the next and previous frame.', shelfLabel='><', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
win.ButtonWithPopup(label='>>', command=weightNext, annotation='Weight toward the next frame.', shelfLabel='>', shelfIcon='defaultTwoStackedLayout',
readUI_toArgs={'weight':'ml_breakdown_value_floatSlider'})
def quickBreakDownUI():
winName = 'ml_quickBreakdownWin'
if mc.window(winName, exists=True):
mc.deleteUI(winName)
mc.window(winName, title='ml :: QBD', iconName='Quick Breakdown', width=100, height=500)
mc.columnLayout(adj=True)
mc.paneLayout(configuration='vertical2', separatorThickness=1)
mc.text('<<')
mc.text('>>')
mc.setParent('..')
for v in (10,20,50,80,90,100,110,120,150):
mc.paneLayout(configuration='vertical2',separatorThickness=1)
mc.button(label=str(v)+' %', command=partial(weightPrevious,v/100.0))
mc.button(label=str(v)+' %', command=partial(weightNext,v/100.0))
mc.setParent('..')
mc.showWindow(winName)
mc.window(winName, edit=True, width=100, height=250)
def drag(*args):
'''The primary command to run the tool'''
BreakdownDragger()
def weightPrevious(weight=0.2, *args):
weightBreakdownStep(direction='previous', weight=weight)
def weightAverage(weight=0.2, *args):
weightBreakdownStep(direction='average', weight=weight)
def weightNext(weight=0.2, *args):
weightBreakdownStep(direction='next', weight=weight)
def weightBreakdownStep(direction='next', weight=0.2):
keySel = utl.KeySelection()
if keySel.selectedKeys():
pass
elif keySel.visibleInGraphEditor():
keySel.setKeyframe()
elif keySel.keyedChannels():
keySel.setKeyframe()
if not keySel.curves:
return
times = list()
values = list()
data = list()
for curve in keySel.curves:
if keySel.selected:
times = mc.keyframe(curve, query=True, timeChange=True, sl=True)
values = mc.keyframe(curve, query=True, valueChange=True, sl=True)
else:
times = [keySel.time]
values = mc.keyframe(curve, time=keySel.time, query=True, valueChange=True)
for i,v in zip(times,values):
nextTime = mc.findKeyframe(curve, time=(i,), which='next')
n = mc.keyframe(curve, time=(nextTime,), query=True, valueChange=True)[0]
prevTime = mc.findKeyframe(curve, time=(i,), which='previous')
p = mc.keyframe(curve, time=(prevTime,), query=True, valueChange=True)[0]
data.append([curve,i,v,n,p])
for d in data:
value = None
if direction == 'next':
value = d[2]+((d[3]-d[2])*weight)
elif direction == 'previous':
value = d[2]+((d[4]-d[2])*weight)
elif direction == 'average':
value = d[2]+(((d[3]+d[4])/2-d[2])*weight)
else: break
mc.keyframe(d[0], time=(d[1],), valueChange=value)
class BreakdownDragger(utl.Dragger):
'''Creates the tool and manages the data'''
def __init__(self,
name='mlBreakdownDraggerContext',
minValue=None,
maxValue=None,
defaultValue=0,
title = 'Breakdown'):
self.keySel = utl.KeySelection()
if self.keySel.selectedKeys():
pass
elif self.keySel.visibleInGraphEditor():
self.keySel.setKeyframe()
elif self.keySel.keyedChannels():
self.keySel.setKeyframe()
if not self.keySel.curves:
return
utl.Dragger.__init__(self, defaultValue=defaultValue, minValue=minValue, maxValue=maxValue, name=name, title=title)
#setup tangent type
itt,ott = utl.getHoldTangentType()
self.time = dict()
self.value = dict()
self.next = dict()
self.prev = dict()
self.average = dict()
for curve in self.keySel.curves:
if self.keySel.selected:
self.time[curve] = mc.keyframe(curve, query=True, timeChange=True, sl=True)
self.value[curve] = mc.keyframe(curve, query=True, valueChange=True, sl=True)
else:
self.time[curve] = self.keySel.time
self.value[curve] = mc.keyframe(curve, time=self.keySel.time, query=True, valueChange=True)
self.next[curve] = list()
self.prev[curve] = list()
self.average[curve] = list()
for i in self.time[curve]:
next = mc.findKeyframe(curve, time=(i,), which='next')
prev = mc.findKeyframe(curve, time=(i,), which='previous')
n = mc.keyframe(curve, time=(next,), query=True, valueChange=True)[0]
p = mc.keyframe(curve, time=(prev,), query=True, valueChange=True)[0]
self.next[curve].append(n)
self.prev[curve].append(p)
self.average[curve].append((n+p)/2)
#set the tangents on this key, and the next and previous, so they flatten properly
mc.keyTangent(curve, time=(i,), itt=itt, ott=ott)
mc.keyTangent(curve, time=(next,), itt=itt)
mc.keyTangent(curve, time=(prev,), ott=ott)
self.setTool()
self.drawString('Left: Weight Prev/Next, Middle: Weight Average')
OpenMaya.MGlobal.displayWarning('Left: Weight Prev/Next, Middle: | |
earlier_ed_cc7d_admits = earlier_ed_admits[earlier_ed_admits['curr_ward'] == 'CC7D']
earlier_ed_ccxd_admits = pd.concat([earlier_ed_cc6d_admits, earlier_ed_cc7d_admits])
earlier_ed_micu_admits = earlier_ed_admits[earlier_ed_admits['icustay_id'].isin(micu_icustay_ids.index)]
earlier_ed_micu_boarder_admits = earlier_ed_micu_admits[(earlier_ed_micu_admits['curr_ward'] != 'CC6D') &
(earlier_ed_micu_admits['curr_ward'] != 'CC7D')]
df13b.ix[row_index, 'MEAN_ED_TIME_24HRS'] = earlier_ed_admits['ED_TIME'].mean()
df13b.ix[row_index, 'MEDIAN_ED_TIME_24HRS'] = earlier_ed_admits['ED_TIME'].median()
df13b.ix[row_index, 'COUNT_ED_ICU_ADMITS_24HRS'] = len(earlier_ed_admits.index)
df13b.ix[row_index, 'COUNT_ED_CC6D_ADMITS_24HRS'] = len(earlier_ed_cc6d_admits.index)
df13b.ix[row_index, 'COUNT_ED_CC7D_ADMITS_24HRS'] = len(earlier_ed_cc7d_admits.index)
df13b.ix[row_index, 'COUNT_ED_CCXD_ADMITS_24HRS'] = len(earlier_ed_ccxd_admits.index)
df13b.ix[row_index, 'COUNT_ED_MICU_ADMITS_24HRS_COMBINED'] = len(earlier_ed_micu_admits.index)
df13b.ix[row_index, 'COUNT_ED_MICU_BOARDER_ADMITS_24HRS_COMBINED'] = len(earlier_ed_micu_boarder_admits.index)
# In[23]:
# For each patient in our study population, find the average ED wait time for all patients
# who needed an ICU bed in the 12 hours prior to the given patient's time of ICU intime
df13b['MEAN_ED_TIME_12HRS'] = np.nan
df13b['MEDIAN_ED_TIME_12HRS'] = np.nan
df13b['COUNT_ED_ICU_ADMITS_12HRS'] = np.nan # Number of admits from ED to any ICU
df13b['COUNT_ED_CC6D_ADMITS_12HRS'] = np.nan # Number of admits from ED to CC6D (under care of ANY service)
df13b['COUNT_ED_CC7D_ADMITS_12HRS'] = np.nan # Number of admits from ED to CC7D (under care of ANY service)
df13b['COUNT_ED_CCXD_ADMITS_12HRS'] = np.nan # Number of admits from ED to EITHER CC6D or CC7D (under care of ANY service)
df13b['COUNT_ED_MICU_ADMITS_12HRS_COMBINED'] = np.nan # Number of admits from ED to EITHER MICU-Orange or -Green TEAM
df13b['COUNT_ED_MICU_BOARDER_ADMITS_12HRS_COMBINED'] = np.nan # Number of admits from ED to EITHER MICU-Orange or MICU-Green TEAM as a boarder
# df13b['COUNT_ED_MICU_ADMITS_12HRS_ORANGE'] = np.nan # Number of admits from ED to MICU-Orange TEAM
# df13b['COUNT_ED_MICU_ADMITS_12HRS_GREEN'] = np.nan # Number of admits from ED to MICU-Green TEAM
micu_icustay_ids = df13b.groupby('transfers.icustay_id').size()
# for row_index, row in df13b.head(100).iterrows():
for row_index, row in df13b.iterrows():
earlier_ed_admits = transfers_ed[(transfers_ed['intime'] < row['transfers.intime']) &
((transfers_ed['intime'] + pd.Timedelta(hours=12)) > row['transfers.intime']) &
(transfers_ed['subject_id'] != row['transfers.subject_id'])]
earlier_ed_cc6d_admits = earlier_ed_admits[earlier_ed_admits['curr_ward'] == 'CC6D']
earlier_ed_cc7d_admits = earlier_ed_admits[earlier_ed_admits['curr_ward'] == 'CC7D']
earlier_ed_ccxd_admits = pd.concat([earlier_ed_cc6d_admits, earlier_ed_cc7d_admits])
earlier_ed_micu_admits = earlier_ed_admits[earlier_ed_admits['icustay_id'].isin(micu_icustay_ids.index)]
earlier_ed_micu_boarder_admits = earlier_ed_micu_admits[(earlier_ed_micu_admits['curr_ward'] != 'CC6D') &
(earlier_ed_micu_admits['curr_ward'] != 'CC7D')]
df13b.ix[row_index, 'MEAN_ED_TIME_12HRS'] = earlier_ed_admits['ED_TIME'].mean()
df13b.ix[row_index, 'MEDIAN_ED_TIME_12HRS'] = earlier_ed_admits['ED_TIME'].median()
df13b.ix[row_index, 'COUNT_ED_ICU_ADMITS_12HRS'] = len(earlier_ed_admits.index)
df13b.ix[row_index, 'COUNT_ED_CC6D_ADMITS_12HRS'] = len(earlier_ed_cc6d_admits.index)
df13b.ix[row_index, 'COUNT_ED_CC7D_ADMITS_12HRS'] = len(earlier_ed_cc7d_admits.index)
df13b.ix[row_index, 'COUNT_ED_CCXD_ADMITS_12HRS'] = len(earlier_ed_ccxd_admits.index)
df13b.ix[row_index, 'COUNT_ED_MICU_ADMITS_12HRS_COMBINED'] = len(earlier_ed_micu_admits.index)
df13b.ix[row_index, 'COUNT_ED_MICU_BOARDER_ADMITS_12HRS_COMBINED'] = len(earlier_ed_micu_boarder_admits.index)
# In[24]:
# For each patient in our study population, find the average ED wait time for all patients
# who needed an ICU bed in the 12 hours prior to the given patient's time of ICU intime
df13b['MEAN_ED_TIME_6HRS'] = np.nan
df13b['MEDIAN_ED_TIME_6HRS'] = np.nan
df13b['COUNT_ED_ICU_ADMITS_6HRS'] = np.nan # Number of admits from ED to any ICU
df13b['COUNT_ED_CC6D_ADMITS_6HRS'] = np.nan # Number of admits from ED to CC6D (under care of ANY service)
df13b['COUNT_ED_CC7D_ADMITS_6HRS'] = np.nan # Number of admits from ED to CC7D (under care of ANY service)
df13b['COUNT_ED_CCXD_ADMITS_6HRS'] = np.nan # Number of admits from ED to EITHER CC6D or CC7D (under care of ANY service)
df13b['COUNT_ED_MICU_ADMITS_6HRS_COMBINED'] = np.nan # Number of admits from ED to EITHER MICU-Orange or -Green TEAM
df13b['COUNT_ED_MICU_BOARDER_ADMITS_6HRS_COMBINED'] = np.nan # Number of admits from ED to EITHER MICU-Orange or MICU-Green TEAM as a boarder
# df13b['COUNT_ED_MICU_ADMITS_12HRS_ORANGE'] = np.nan # Number of admits from ED to MICU-Orange TEAM
# df13b['COUNT_ED_MICU_ADMITS_12HRS_GREEN'] = np.nan # Number of admits from ED to MICU-Green TEAM
micu_icustay_ids = df13b.groupby('transfers.icustay_id').size()
# for row_index, row in df13b.head(100).iterrows():
for row_index, row in df13b.iterrows():
earlier_ed_admits = transfers_ed[(transfers_ed['intime'] < row['transfers.intime']) &
((transfers_ed['intime'] + pd.Timedelta(hours=6)) > row['transfers.intime']) &
(transfers_ed['subject_id'] != row['transfers.subject_id'])]
earlier_ed_cc6d_admits = earlier_ed_admits[earlier_ed_admits['curr_ward'] == 'CC6D']
earlier_ed_cc7d_admits = earlier_ed_admits[earlier_ed_admits['curr_ward'] == 'CC7D']
earlier_ed_ccxd_admits = pd.concat([earlier_ed_cc6d_admits, earlier_ed_cc7d_admits])
earlier_ed_micu_admits = earlier_ed_admits[earlier_ed_admits['icustay_id'].isin(micu_icustay_ids.index)]
earlier_ed_micu_boarder_admits = earlier_ed_micu_admits[(earlier_ed_micu_admits['curr_ward'] != 'CC6D') &
(earlier_ed_micu_admits['curr_ward'] != 'CC7D')]
df13b.ix[row_index, 'MEAN_ED_TIME_6HRS'] = earlier_ed_admits['ED_TIME'].mean()
df13b.ix[row_index, 'MEDIAN_ED_TIME_6HRS'] = earlier_ed_admits['ED_TIME'].median()
df13b.ix[row_index, 'COUNT_ED_ICU_ADMITS_6HRS'] = len(earlier_ed_admits.index)
df13b.ix[row_index, 'COUNT_ED_CC6D_ADMITS_6HRS'] = len(earlier_ed_cc6d_admits.index)
df13b.ix[row_index, 'COUNT_ED_CC7D_ADMITS_6HRS'] = len(earlier_ed_cc7d_admits.index)
df13b.ix[row_index, 'COUNT_ED_CCXD_ADMITS_6HRS'] = len(earlier_ed_ccxd_admits.index)
df13b.ix[row_index, 'COUNT_ED_MICU_ADMITS_6HRS_COMBINED'] = len(earlier_ed_micu_admits.index)
df13b.ix[row_index, 'COUNT_ED_MICU_BOARDER_ADMITS_6HRS_COMBINED'] = len(earlier_ed_micu_boarder_admits.index)
# In[126]:
# Store df13b
# mimic_common.df_to_csv('df13bi.csv', df13b)
# Load df13b from stored CSV file (if we don't want to have to re-generate it)
df13b = pd.read_csv('~/dev/data/mimic3_local_storage/df13bi.csv', parse_dates=[8, 15, 20, 112, 113, 115, 116, 117, 118, 120, 121, 122, 125, 126])
# In[127]:
# TO BE MOVED HIGHER UP
# Determine the number of ICU free days (a LOS proxy used to account for the fact that death shortens LOS)
df13b['icu_free_days_28'] = np.nan
df13b.icu_free_days_28[(df13b['days_survived'] < 28)] = 0
df13b.icu_free_days_28[(df13b['days_survived'] >= 28)] = 28 - (df13b['icustay_los_total'].astype(int)/(24))
df13b.icu_free_days_28[(df13b['icu_free_days_28'] < 0)] = 0
df13b['icu_free_days_21'] = np.nan
df13b.icu_free_days_21[(df13b['days_survived'] < 21)] = 0
df13b.icu_free_days_21[(df13b['days_survived'] >= 21)] = 21 - (df13b['icustay_los_total'].astype(int)/(24))
df13b.icu_free_days_21[(df13b['icu_free_days_21'] < 0)] = 0
df13b['icu_free_days_35'] = np.nan
df13b.icu_free_days_35[(df13b['days_survived'] < 35)] = 0
df13b.icu_free_days_35[(df13b['days_survived'] >= 35)] = 35 - (df13b['icustay_los_total'].astype(int)/(24))
df13b.icu_free_days_35[(df13b['icu_free_days_35'] < 0)] = 0
# In[128]:
# TO BE MOVED HIGHER UP
# Generate different combined remaining beds measures
df13b['west_initial_team_census'] = df13b['initial_team_census'] + df13b['other_initial_team_census']
df13b['eastwest_initial_team_census'] = df13b['initial_team_census'] + df13b['other_initial_team_census'] + df13b['msicu_initial_team_census']
# In[129]:
# Calculate LOS in days prior to ICU stay
df13b['los_days_prior_to_icu'] = (df13b['icustays.intime'] - df13b['admissions.admittime']).astype(int)/(1000000000*60*60*24)
# In[ ]:
notes = pd.read_csv('~/Downloads/MIMIC/data/NOTEEVENTS_DATA_TABLE.csv',
usecols=['ROW_ID', 'RECORD_ID', 'SUBJECT_ID', 'HADM_ID', 'CHARTDATE', 'CATEGORY', 'DESCRIPTION'],
parse_dates=[4])
# In[ ]:
ecg = notes[(notes['CATEGORY'] == 'ECG')]
echo = notes[(notes['CATEGORY'] == 'Echo')]
radiology = notes[(notes['CATEGORY'] == 'Radiology')]
cxr = radiology[(radiology['DESCRIPTION'] == 'CHEST (PORTABLE AP)') |
(radiology['DESCRIPTION'] == 'CHEST (PA & LAT)') |
(radiology['DESCRIPTION'] == 'CHEST (PRE-OP PA & LAT)') |
(radiology['DESCRIPTION'] == 'CHEST (SINGLE VIEW)') ]
# cxr_by_hadm = cxr.groupby('HADM_ID').size()
# ecg_by_hadm = ecg.groupby('HADM_ID').size()
# echo_by_hadm = echo.groupby('HADM_ID').size()
# In[ ]:
# Store ecg, echo, cxr, and radiology dataframes
mimic_common.df_to_csv('ecg.csv', ecg)
mimic_common.df_to_csv('echo.csv', echo)
mimic_common.df_to_csv('cxr.csv', cxr)
mimic_common.df_to_csv('radiology.csv', radiology)
# Load df14 from stored CSV file (if we don't want to have to re-generate it)
# ecg = pd.read_csv('~/dev/data/mimic3_local_storage/ecg.csv', parse_dates=[4])
# echo = pd.read_csv('~/dev/data/mimic3_local_storage/echo.csv', parse_dates=[4])
# cxr = pd.read_csv('~/dev/data/mimic3_local_storage/cxr.csv', parse_dates=[4])
# radiology = pd.read_csv('~/dev/data/mimic3_local_storage/radiology.csv', parse_dates=[4])
# In[ ]:
df14 = df13
# For each patient under the care of the MICU-Orange or MICU-Green service, calculate the number of
# chest X-rays, echocardiograms, and ECGs ordered for each patient during their ICU stay
df14['cxr_count'] = np.nan
df14['echo_count'] = np.nan
df14['ecg_count'] = np.nan
df14['total_orders_count'] = np.nan
for row_index, row in df14.iterrows():
# Since CHARTDATE only records the DAY (does not include HH:MM:SS) of a study,
# we will add 24 hours to CHARTDATE before comparing it to transfers.intime
cxr_count = cxr[(cxr['SUBJECT_ID'] == row['transfers.subject_id']) &
((cxr['CHARTDATE'] + pd.Timedelta(hours=24)) > row['icustays.intime']) &
(cxr['CHARTDATE'] < row['icustays.outtime'])]
echo_count = echo[(echo['SUBJECT_ID'] == row['transfers.subject_id']) &
((echo['CHARTDATE'] + pd.Timedelta(hours=24)) > row['icustays.intime']) &
(echo['CHARTDATE'] < row['icustays.outtime'])]
ecg_count = ecg[(ecg['SUBJECT_ID'] == row['transfers.subject_id']) &
((ecg['CHARTDATE'] + pd.Timedelta(hours=24)) > row['icustays.intime']) &
(ecg['CHARTDATE'] < row['icustays.outtime'])]
# Create a new dataframe by concatenating the cxr, echo and ecg dataframes
total_orders_count = pd.concat([cxr_count, echo_count, ecg_count])
# Store the counts in their respective columns
df14.ix[row_index, 'cxr_count'] = len(cxr_count.index)
df14.ix[row_index, 'echo_count'] = len(echo_count.index)
df14.ix[row_index, 'ecg_count'] = len(ecg_count.index)
df14.ix[row_index, 'total_orders_count'] = len(total_orders_count.index)
# In[ ]:
# Store df14 (pc for 'post-change' with respect to change to using 'initial_' variables)
# mimic_common.df_to_csv('df14_pc.csv', df14)
# Load df14 from stored CSV file (if we don't want to have to re-generate it)
# df14 = pd.read_csv('~/dev/data/mimic3_local_storage/df14_pc.csv', parse_dates=[8, 15, 20, 39, 40, 42, 43, 44, 45, 47, 48, 49, 52, 53])
# df14 = pd.read_csv('~/dev/data/mimic3_local_storage/df14_pc.csv', parse_dates=[8, 15, 20, 112, 113, 115, 116, 117, 118, 120, 121, 122, 125, 126])
# In[130]:
# DO NOT NEED UNLESS SKIPPING THE RADIOLOGY DATA MERGE
df14 = df13b
# In[131]:
# Add the primary ICD-9-CM diagnosis for each admission
# Load the ICD diagnoses table
diagnoses = mimic_common.load_table(mimic_schema.diagnoses_icd_schema)
mimic_schema.diagnoses_icd_schema.add_prefix(diagnoses)
# Filter the diagnoses dataframe, keeping only those rows where SEQ_NUM == 1 (i.e. primary diagnoses)
primary_diagnoses = diagnoses[diagnoses['diagnoses_icd.seq_num'] == 1]
# Merge the primary diagnoses into our main data
df15 = left_join(df14, primary_diagnoses, left_on='transfers.hadm_id', right_on='diagnoses_icd.hadm_id')
# Convert the diagnoses from integers to strings
df15['diagnoses_icd.icd9_code'] = df15['diagnoses_icd.icd9_code'].astype(str)
# In[135]:
# Determine the AHRQ Clinical Classification System (CCS) categories for each diagnosis
# New dataframe from CSV file containing ICD-9-CM to CCS mapping
ccs = pd.read_csv('~/chatto-transform/ccs_dx.csv')
# Left join on the two dataframes
df16 = left_join(df15, ccs, left_on='diagnoses_icd.icd9_code', right_on='icd_code')
# In[133]:
# 71% of all cases are admissions from the ED
# df16
# df16[(df16['transfers.eventtype'] == 'admit')]
# df16[(df16['transfers.eventtype'] == 'transfer')]
# In[134]:
# DO NOT NEED UNLESS SKIPPING THE ICD-9 DIAGNOSIS MERGE
df16 = df14
# In[136]:
# The data is now in a format such that...
# 1. ICU stays occupying multiple rows but with no time as a boarder are identical
# - Therefore all but the first grouped row should be deleted (after sorting by transfers.intime ASC)
# 2. ICU stays occupying multiple rows with all time spent as a boarder are identical
# - Therefore all but the first grouped row should be deleted (after sorting by transfers.intime ASC)
# 3. ICU stays occupying multiple rows that have SOME but NOT ALL time as a boarder are identical
# - Therefore all but the first grouped row should be deleted (after sorting by transfers.intime ASC)
# NB: Identical refers to the following fields...
# icustay_boarder_ever
# icustay_los_boarder
# | |
run_target=self.ndb_device_0.run_target),
common.PublishEventType.DEVICE_NOTE_EVENT),
mock.call(
api_messages.NoteEvent(
note=device_note_collection_msg.notes[1],
hostname=self.ndb_device_1.hostname,
lab_name=self.ndb_device_1.lab_name,
run_target=self.ndb_device_1.run_target),
common.PublishEventType.DEVICE_NOTE_EVENT),
mock.call(
api_messages.NoteEvent(
note=device_note_collection_msg.notes[2],
hostname=self.ndb_device_2.hostname,
lab_name=self.ndb_device_2.lab_name,
run_target=self.ndb_device_2.run_target),
common.PublishEventType.DEVICE_NOTE_EVENT),
])
@mock.patch.object(note_manager, 'PublishMessage')
def testBatchUpdateNotes_ExistingNoteAndPredefinedMessage(
self, mock_publish_device_note_message):
"""Tests updating notes with the same content and PredefinedMessage."""
existing_entities = [
datastore_entities.Note(
hostname=self.ndb_device_0.hostname,
device_serial=self.ndb_device_0.device_serial,
type=common.NoteType.DEVICE_NOTE),
datastore_entities.PredefinedMessage(
type=common.PredefinedMessageType.DEVICE_OFFLINE_REASON,
content='offline_reason-1',
lab_name='lab-1',
used_count=2),
datastore_entities.PredefinedMessage(
type=common.PredefinedMessageType.DEVICE_RECOVERY_ACTION,
content='recovery_action-1',
lab_name='lab-1',
used_count=3),
]
keys = ndb.put_multi(existing_entities)
api_request = {
'user':
'user-1',
'message':
'message-1',
'offline_reason_id':
str(keys[1].id()),
'recovery_action_id':
str(keys[2].id()),
'event_time':
self.TIMESTAMP.isoformat(),
'notes': [
{
'id': str(keys[0].id()),
},
{
'device_serial': self.ndb_device_1.device_serial,
'hostname': self.ndb_device_1.hostname,
},
{
'device_serial': self.ndb_device_2.device_serial,
'hostname': self.ndb_device_2.hostname,
},
],
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.BatchUpdateNotesWithPredefinedMessage',
api_request)
self.assertEqual('200 OK', api_response.status)
device_note_collection_msg = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
note_msgs = device_note_collection_msg.notes
self.assertEqual(3, len(note_msgs))
self.assertEqual(self.ndb_device_0.device_serial,
note_msgs[0].device_serial)
self.assertEqual(self.ndb_device_1.device_serial,
note_msgs[1].device_serial)
self.assertEqual(self.ndb_device_2.device_serial,
note_msgs[2].device_serial)
self.assertEqual(self.ndb_device_0.hostname, note_msgs[0].hostname)
self.assertEqual(self.ndb_device_1.hostname, note_msgs[1].hostname)
self.assertEqual(self.ndb_device_2.hostname, note_msgs[2].hostname)
self.assertEqual('message-1', note_msgs[0].message)
self.assertEqual('message-1', note_msgs[1].message)
self.assertEqual('message-1', note_msgs[2].message)
self.assertEqual('user-1', note_msgs[0].user)
self.assertEqual('user-1', note_msgs[1].user)
self.assertEqual('user-1', note_msgs[2].user)
self.assertEqual('offline_reason-1', note_msgs[0].offline_reason)
self.assertEqual('offline_reason-1', note_msgs[1].offline_reason)
self.assertEqual('offline_reason-1', note_msgs[2].offline_reason)
self.assertEqual('recovery_action-1', note_msgs[0].recovery_action)
self.assertEqual('recovery_action-1', note_msgs[1].recovery_action)
self.assertEqual('recovery_action-1', note_msgs[2].recovery_action)
self.assertEqual(self.TIMESTAMP, note_msgs[0].event_time)
self.assertEqual(self.TIMESTAMP, note_msgs[1].event_time)
self.assertEqual(self.TIMESTAMP, note_msgs[2].event_time)
# Side Effect: Assert each PredefinedMessage is created only once.
offline_reasons = list(
datastore_entities.PredefinedMessage.query()
.filter(datastore_entities.PredefinedMessage.lab_name == 'lab-1')
.filter(datastore_entities.PredefinedMessage.type ==
common.PredefinedMessageType.DEVICE_OFFLINE_REASON)
.fetch())
self.assertEqual(1, len(offline_reasons))
self.assertEqual(5, offline_reasons[0].used_count)
self.assertEqual('offline_reason-1', offline_reasons[0].content)
recovery_actions = list(
datastore_entities.PredefinedMessage.query()
.filter(datastore_entities.PredefinedMessage.lab_name == 'lab-1')
.filter(datastore_entities.PredefinedMessage.type ==
common.PredefinedMessageType.DEVICE_RECOVERY_ACTION)
.fetch())
self.assertEqual(1, len(recovery_actions))
self.assertEqual(6, recovery_actions[0].used_count)
self.assertEqual('recovery_action-1', recovery_actions[0].content)
# Side Effect: Assert DeviceInfoHistory is written into datastore.
histories = list(
datastore_entities.DeviceInfoHistory.query(
datastore_entities.DeviceInfoHistory.device_serial ==
self.ndb_device_0.device_serial).fetch())
self.assertEqual(0, len(histories))
histories = list(
datastore_entities.DeviceInfoHistory.query(
datastore_entities.DeviceInfoHistory.device_serial ==
self.ndb_device_1.device_serial).fetch())
self.assertEqual(1, len(histories))
self.assertEqual(
int(device_note_collection_msg.notes[1].id),
histories[0].extra_info['device_note_id'])
histories = list(
datastore_entities.DeviceInfoHistory.query(
datastore_entities.DeviceInfoHistory.device_serial ==
self.ndb_device_2.device_serial).fetch())
self.assertEqual(1, len(histories))
self.assertEqual(
int(device_note_collection_msg.notes[2].id),
histories[0].extra_info['device_note_id'])
# Side Effect: Assert device note event is published.
mock_publish_device_note_message.assert_has_calls([
mock.call(
api_messages.NoteEvent(
note=device_note_collection_msg.notes[0],
hostname=self.ndb_device_0.hostname,
lab_name=self.ndb_device_0.lab_name,
run_target=self.ndb_device_0.run_target),
common.PublishEventType.DEVICE_NOTE_EVENT),
mock.call(
api_messages.NoteEvent(
note=device_note_collection_msg.notes[1],
hostname=self.ndb_device_1.hostname,
lab_name=self.ndb_device_1.lab_name,
run_target=self.ndb_device_1.run_target),
common.PublishEventType.DEVICE_NOTE_EVENT),
mock.call(
api_messages.NoteEvent(
note=device_note_collection_msg.notes[2],
hostname=self.ndb_device_2.hostname,
lab_name=self.ndb_device_2.lab_name,
run_target=self.ndb_device_2.run_target),
common.PublishEventType.DEVICE_NOTE_EVENT),
])
def testBatchUpdateNotes_InvalidPredefinedMessages(self):
"""Tests updating notes with the same content and PredefinedMessage."""
offline_reason = 'offline-reason'
recovery_action = 'recovery-action'
lab_name = 'lab-name'
existing_entities = [
datastore_entities.PredefinedMessage(
key=ndb.Key(datastore_entities.PredefinedMessage, 111),
lab_name=lab_name,
type=api_messages.PredefinedMessageType.DEVICE_OFFLINE_REASON,
content=offline_reason,
used_count=2),
datastore_entities.PredefinedMessage(
key=ndb.Key(datastore_entities.PredefinedMessage, 222),
lab_name=lab_name,
type=api_messages.PredefinedMessageType.DEVICE_RECOVERY_ACTION,
content=recovery_action,
used_count=5),
]
ndb.put_multi(existing_entities)
# invalid recovery action
api_request = {
'user': 'user-1',
'message': 'message-1',
'offline_reason_id': '111',
'recovery_action_id': '444',
'notes': [
{
'device_serial': self.ndb_device_0.device_serial,
'hostname': self.ndb_device_0.hostname,
},
{
'device_serial': self.ndb_device_1.device_serial,
'hostname': self.ndb_device_1.hostname,
},
{
'device_serial': self.ndb_device_2.device_serial,
'hostname': self.ndb_device_2.hostname,
},
],
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.BatchUpdateNotesWithPredefinedMessage',
api_request,
expect_errors=True)
self.assertEqual('400 Bad Request', api_response.status)
# invalid offline reason
api_request = {
'user': 'user-1',
'message': 'message-1',
'offline_reason_id': '333',
'recovery_action_id': '222',
'notes': [
{
'device_serial': self.ndb_device_0.device_serial,
'hostname': self.ndb_device_0.hostname,
},
{
'device_serial': self.ndb_device_1.device_serial,
'hostname': self.ndb_device_1.hostname,
},
{
'device_serial': self.ndb_device_2.device_serial,
'hostname': self.ndb_device_2.hostname,
},
],
}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.BatchUpdateNotesWithPredefinedMessage',
api_request,
expect_errors=True)
self.assertEqual('400 Bad Request', api_response.status)
def testGetDevice_includeHistory(self):
"""Tests GetDevice including history when they are available."""
api_request = {
'device_serial': self.ndb_device_0.device_serial,
'include_history': True
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(self.ndb_device_0.device_serial, device.device_serial)
self.assertEqual(self.ndb_device_0.hostname, device.hostname)
self.assertEqual(self.ndb_device_0.battery_level, device.battery_level)
self.assertEqual(self.ndb_device_0.hidden, device.hidden)
self.assertEqual(2, len(device.history))
# history will be sorted with newest first
self.assertEqual(self.device_history_1.timestamp,
device.history[0].timestamp)
self.assertEqual(self.device_history_1.state, device.history[0].state)
self.assertEqual(self.device_history_0.timestamp,
device.history[1].timestamp)
self.assertEqual(self.device_history_0.state, device.history[1].state)
def testGetDevice_includeHistoryNoneAvailable(self):
"""Tests GetDevice including history when none available."""
api_request = {
'device_serial': self.ndb_device_1.device_serial,
'include_history': True
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(self.ndb_device_1.device_serial, device.device_serial)
self.assertEqual(self.ndb_device_1.hostname, device.hostname)
self.assertEqual(self.ndb_device_1.battery_level, device.battery_level)
self.assertEqual(self.ndb_device_1.hidden, device.hidden)
self.assertEqual(0, len(device.history))
def testGetDevice_includeNotesAndHistory(self):
"""Tests GetDevice including notes and history."""
api_request = {
'device_serial': self.ndb_device_0.device_serial,
'include_notes': True,
'include_history': True
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertEqual('200 OK', api_response.status)
self.assertEqual(1, len(device.notes))
self.assertEqual(2, len(device.history))
def testRemove(self):
"""Tests Remove."""
# Check that the existing device is not set to hidden
api_request = {'device_serial': self.ndb_device_0.device_serial}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertFalse(device.hidden)
# Call Remove
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.Remove',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
# Verify API response
self.assertEqual('200 OK', api_response.status)
self.assertTrue(device.hidden)
# Verify by retrieving the device
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertTrue(device.hidden)
def testRemove_withHostname(self):
"""Tests Remove."""
# Check that the existing device is not set to hidden
api_request = {
'device_serial': self.ndb_device_0.device_serial,
'hostname': self.ndb_device_0.hostname
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertFalse(device.hidden)
# Call Remove
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.Remove',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
# Verify API response
self.assertEqual('200 OK', api_response.status)
self.assertTrue(device.hidden)
# Verify by retrieving the device
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertTrue(device.hidden)
def testRemove_missingDevice(self):
"""Test Remove with an invalid device."""
api_request = {'device_serial': 'some-fake-serial'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.Remove', api_request, expect_errors=True)
self.assertEqual('404 Not Found', api_response.status)
def testRestore(self):
"""Tests Restore."""
# Check that the existing device is set to hidden
api_request = {'device_serial': self.ndb_device_2.device_serial}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertTrue(device.hidden)
# Call Restore
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.Restore',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
# Verify API response
self.assertEqual('200 OK', api_response.status)
self.assertFalse(device.hidden)
# Verify by retrieving the device
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertFalse(device.hidden)
def testRestore_withHostname(self):
"""Tests Restore."""
# Check that the existing device is set to hidden
api_request = {
'device_serial': self.ndb_device_2.device_serial,
'hostname': self.ndb_device_2.hostname,
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertTrue(device.hidden)
# Call Restore
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.Restore',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
# Verify API response
self.assertEqual('200 OK', api_response.status)
self.assertFalse(device.hidden)
# Verify by retrieving the device
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.GetDevice',
api_request)
device = protojson.decode_message(api_messages.DeviceInfo,
api_response.body)
self.assertFalse(device.hidden)
def testRestore_missingDevice(self):
"""Test Remove with an invalid device."""
api_request = {'device_serial': 'some-fake-serial'}
api_response = self.testapp.post_json(
'/_ah/api/ClusterDeviceApi.Restore', api_request, expect_errors=True)
self.assertEqual('404 Not Found', api_response.status)
def testListDevices_ignoreOfflineDevices(self):
"""Test list ignoring offline devices."""
# Should list all 4 devices when excluding unavailable devices
api_request = {'include_offline_devices': False}
self._assertDeviceCount(api_request, 4)
# Set Device 4 state to Gone
self._setDeviceState(self.ndb_device_4.device_serial,
common.DeviceState.GONE)
# Should only list 3 devices when excluding unavailable devices
api_request = {'include_offline_devices': False}
self._assertDeviceCount(api_request, 3)
# Set Device 3 state to Fastboot
self._setDeviceState(self.ndb_device_3.device_serial,
common.DeviceState.FASTBOOT)
# Should list 2 devices when excluding unavailable devices
api_request = {'include_offline_devices': False}
self._assertDeviceCount(api_request, 2)
# Should list 4 devices when including unavailable devices
api_request = {'include_offline_devices': True}
self._assertDeviceCount(api_request, 4)
def testListDeviceNotes(self):
note_entities = [
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user1',
timestamp=datetime.datetime(1928, 1, 1),
message='message_1',
offline_reason='offline_reason_1',
recovery_action='recovery_action_1'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user2',
timestamp=datetime.datetime(1918, 1, 1),
message='message_2',
offline_reason='offline_reason_2',
recovery_action='recovery_action_2'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user3',
timestamp=datetime.datetime(1988, 1, 1),
message='message_3',
offline_reason='offline_reason_3',
recovery_action='recovery_action_3'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_2',
user='user4',
timestamp=datetime.datetime(2008, 1, 1),
message='message_4',
offline_reason='offline_reason_4',
recovery_action='recovery_action_4'),
]
ndb.put_multi(note_entities)
# The result will be sorted by timestamp in descending order. `
api_request = {
'device_serial': 'device_1',
'count': 2,
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.ListNotes',
api_request)
device_note_collection_msg = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
self.assertTrue(device_note_collection_msg.more)
self.assertIsNotNone(device_note_collection_msg.next_cursor)
note_msgs = device_note_collection_msg.notes
self.assertEqual(2, len(note_msgs))
self.assertEqual(note_msgs[0].device_serial, note_entities[2].device_serial)
self.assertEqual(note_msgs[0].user, note_entities[2].user)
self.assertEqual(note_msgs[0].timestamp,
note_entities[2].timestamp)
self.assertEqual(note_msgs[0].message, note_entities[2].message)
self.assertEqual(note_msgs[0].offline_reason,
note_entities[2].offline_reason)
self.assertEqual(note_msgs[0].recovery_action,
note_entities[2].recovery_action)
self.assertEqual(note_msgs[1].device_serial, note_entities[0].device_serial)
self.assertEqual(note_msgs[1].user, note_entities[0].user)
self.assertEqual(note_msgs[1].timestamp,
note_entities[0].timestamp)
self.assertEqual(note_msgs[1].message, note_entities[0].message)
self.assertEqual(note_msgs[1].offline_reason,
note_entities[0].offline_reason)
self.assertEqual(note_msgs[1].recovery_action,
note_entities[0].recovery_action)
def testListDeviceNotes_withCursorAndOffsetAndBackwards(self):
note_entities = [
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user1',
timestamp=datetime.datetime(1928, 1, 1),
message='message_1',
offline_reason='offline_reason_1',
recovery_action='recovery_action_1'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user2',
timestamp=datetime.datetime(1918, 1, 1),
message='message_2',
offline_reason='offline_reason_2',
recovery_action='recovery_action_2'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user3',
timestamp=datetime.datetime(1988, 1, 1),
message='message_3',
offline_reason='offline_reason_3',
recovery_action='recovery_action_3'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_2',
user='user4',
timestamp=datetime.datetime(2008, 1, 1),
message='message_4',
offline_reason='offline_reason_4',
recovery_action='recovery_action_4'),
]
ndb.put_multi(note_entities)
# The result will be sorted by timestamp in descending order. `
api_request = {
'device_serial': 'device_1',
'count': 2,
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.ListNotes',
api_request)
device_note_collection_msg = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
self.assertIsNotNone(device_note_collection_msg.next_cursor)
note_msgs = device_note_collection_msg.notes
self.assertEqual(2, len(note_msgs))
self.assertEqual(note_msgs[0].device_serial, note_entities[2].device_serial)
self.assertEqual(note_msgs[0].user, note_entities[2].user)
self.assertEqual(note_msgs[0].timestamp, note_entities[2].timestamp)
self.assertEqual(note_msgs[0].message, note_entities[2].message)
self.assertEqual(note_msgs[0].offline_reason,
note_entities[2].offline_reason)
self.assertEqual(note_msgs[0].recovery_action,
note_entities[2].recovery_action)
self.assertEqual(note_msgs[1].device_serial, note_entities[0].device_serial)
self.assertEqual(note_msgs[1].user, note_entities[0].user)
self.assertEqual(note_msgs[1].timestamp,
note_entities[0].timestamp)
self.assertEqual(note_msgs[1].message, note_entities[0].message)
self.assertEqual(note_msgs[1].offline_reason,
note_entities[0].offline_reason)
self.assertEqual(note_msgs[1].recovery_action,
note_entities[0].recovery_action)
# fetch next page
api_request = {
'device_serial': 'device_1',
'count': 2,
'cursor': device_note_collection_msg.next_cursor,
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.ListNotes',
api_request)
device_note_collection_msg = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
self.assertIsNotNone(device_note_collection_msg.prev_cursor)
note_msgs = device_note_collection_msg.notes
self.assertEqual(1, len(note_msgs))
self.assertEqual(note_msgs[0].device_serial, note_entities[1].device_serial)
self.assertEqual(note_msgs[0].user, note_entities[1].user)
self.assertEqual(note_msgs[0].timestamp,
note_entities[1].timestamp)
self.assertEqual(note_msgs[0].message, note_entities[1].message)
self.assertEqual(note_msgs[0].offline_reason,
note_entities[1].offline_reason)
self.assertEqual(note_msgs[0].recovery_action,
note_entities[1].recovery_action)
# fetch previous page (same as first page)
api_request = {
'device_serial': 'device_1',
'count': 2,
'cursor': device_note_collection_msg.prev_cursor,
'backwards': True,
}
api_response = self.testapp.post_json('/_ah/api/ClusterDeviceApi.ListNotes',
api_request)
device_note_collection_msg = protojson.decode_message(
api_messages.NoteCollection, api_response.body)
note_msgs = device_note_collection_msg.notes
self.assertEqual(2, len(note_msgs))
self.assertEqual(note_msgs[0].device_serial, note_entities[2].device_serial)
self.assertEqual(note_msgs[0].user, note_entities[2].user)
self.assertEqual(note_msgs[0].timestamp,
note_entities[2].timestamp)
self.assertEqual(note_msgs[0].message, note_entities[2].message)
self.assertEqual(note_msgs[0].offline_reason,
note_entities[2].offline_reason)
self.assertEqual(note_msgs[0].recovery_action,
note_entities[2].recovery_action)
self.assertEqual(note_msgs[1].device_serial, note_entities[0].device_serial)
self.assertEqual(note_msgs[1].user, note_entities[0].user)
self.assertEqual(note_msgs[1].timestamp,
note_entities[0].timestamp)
self.assertEqual(note_msgs[1].message, note_entities[0].message)
self.assertEqual(note_msgs[1].offline_reason,
note_entities[0].offline_reason)
self.assertEqual(note_msgs[1].recovery_action,
note_entities[0].recovery_action)
def testBatchGetDeviceNotes(self):
note_entities = [
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user1',
timestamp=datetime.datetime(1928, 1, 1),
message='message_1',
offline_reason='offline_reason_1',
recovery_action='recovery_action_1'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user2',
timestamp=datetime.datetime(1918, 1, 1),
message='message_2',
offline_reason='offline_reason_2',
recovery_action='recovery_action_2'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_1',
user='user3',
timestamp=datetime.datetime(1988, 1, 1),
message='message_3',
offline_reason='offline_reason_3',
recovery_action='recovery_action_3'),
datastore_entities.Note(
type=common.NoteType.DEVICE_NOTE,
device_serial='device_2',
user='user4',
timestamp=datetime.datetime(2008, 1, 1),
message='message_4',
offline_reason='offline_reason_4',
recovery_action='recovery_action_4'),
]
keys = ndb.put_multi(note_entities)
# note4 will not be included in response because device_serial is | |
# SECUREAUTH LABS. Copyright 2020 SecureAuth Corporation. All rights reserved.
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Authors:
# <NAME> <<EMAIL>> / Positive Technologies (https://www.ptsecurity.com/)
#
# Error codes taken from:
# [MS-OXCDATA]
# http://www.eventid.net/display-eventid-2115-source-MSExchangeDSAccess-eventno-4469-phase-1.htm
#
# MAPI properties taken from:
# https://gist.github.com/mohemiv/76c265ac92ca026a10b7756899b5f8d5 (MIT)
#
ERROR_MESSAGES = {
0x80004002: ("MAPI_E_INTERFACE_NO_SUPPORT"),
0x80004005: ("MAPI_E_CALL_FAILED"),
0x80040102: ("MAPI_E_NO_SUPPORT"),
0x80040103: ("MAPI_E_BAD_CHARWIDTH"),
0x80040105: ("MAPI_E_STRING_TOO_LONG"),
0x80040106: ("MAPI_E_UNKNOWN_FLAGS"),
0x80040107: ("MAPI_E_INVALID_ENTRYID"),
0x80040108: ("MAPI_E_INVALID_OBJECT"),
0x80040109: ("MAPI_E_OBJECT_CHANGED"),
0x8004010A: ("MAPI_E_OBJECT_DELETED"),
0x8004010B: ("MAPI_E_BUSY"),
0x8004010D: ("MAPI_E_NOT_ENOUGH_DISK"),
0x8004010E: ("MAPI_E_NOT_ENOUGH_RESOURCES"),
0x8004010F: ("MAPI_E_NOT_FOUND"),
0x80040110: ("MAPI_E_VERSION"),
0x80040111: ("MAPI_E_LOGON_FAILED"),
0x80040112: ("MAPI_E_SESSION_LIMIT"),
0x80040113: ("MAPI_E_USER_CANCEL"),
0x80040114: ("MAPI_E_UNABLE_TO_ABORT"),
0x80040115: ("MAPI_E_NETWORK_ERROR"),
0x80040116: ("MAPI_E_DISK_ERROR"),
0x80040117: ("MAPI_E_TOO_COMPLEX"),
0x80040118: ("MAPI_E_BAD_COLUMN"),
0x80040119: ("MAPI_E_EXTENDED_ERROR"),
0x8004011A: ("MAPI_E_COMPUTED"),
0x8004011B: ("MAPI_E_CORRUPT_DATA"),
0x8004011C: ("MAPI_E_UNCONFIGURED"),
0x8004011D: ("MAPI_E_FAILONEPROVIDER"),
0x8004011E: ("MAPI_E_UNKNOWN_CPID"),
0x8004011F: ("MAPI_E_UNKNOWN_LCID"),
0x80040120: ("MAPI_E_PASSWORD_CHANGE_REQUIRED"),
0x80040121: ("MAPI_E_PASSWORD_EXPIRED"),
0x80040122: ("MAPI_E_INVALID_WORKSTATION_ACCOUNT"),
0x80040123: ("MAPI_E_INVALID_ACCESS_TIME"),
0x80040124: ("MAPI_E_ACCOUNT_DISABLED"),
0x80040200: ("MAPI_E_END_OF_SESSION"),
0x80040201: ("MAPI_E_UNKNOWN_ENTRYID"),
0x80040202: ("MAPI_E_MISSING_REQUIRED_COLUMN"),
0x00040203: ("MAPI_W_NO_SERVICE"),
0x80040301: ("MAPI_E_BAD_VALUE"),
0x80040302: ("MAPI_E_INVALID_TYPE"),
0x80040303: ("MAPI_E_TYPE_NO_SUPPORT"),
0x80040304: ("MAPI_E_UNEXPECTED_TYPE"),
0x80040305: ("MAPI_E_TOO_BIG"),
0x80040306: ("MAPI_E_DECLINE_COPY"),
0x80040307: ("MAPI_E_UNEXPECTED_ID"),
0x00040380: ("MAPI_W_ERRORS_RETURNED"),
0x80040400: ("MAPI_E_UNABLE_TO_COMPLETE"),
0x80040401: ("MAPI_E_TIMEOUT"),
0x80040402: ("MAPI_E_TABLE_EMPTY"),
0x80040403: ("MAPI_E_TABLE_TOO_BIG"),
0x80040405: ("MAPI_E_INVALID_BOOKMARK"),
0x00040481: ("MAPI_W_POSITION_CHANGED"),
0x00040482: ("MAPI_W_APPROX_COUNT"),
0x80040500: ("MAPI_E_WAIT"),
0x80040501: ("MAPI_E_CANCEL"),
0x80040502: ("MAPI_E_NOT_ME"),
0x00040580: ("MAPI_W_CANCEL_MESSAGE"),
0x80040600: ("MAPI_E_CORRUPT_STORE"),
0x80040601: ("MAPI_E_NOT_IN_QUEUE"),
0x80040602: ("MAPI_E_NO_SUPPRESS"),
0x80040604: ("MAPI_E_COLLISION"),
0x80040605: ("MAPI_E_NOT_INITIALIZED"),
0x80040606: ("MAPI_E_NON_STANDARD"),
0x80040607: ("MAPI_E_NO_RECIPIENTS"),
0x80040608: ("MAPI_E_SUBMITTED"),
0x80040609: ("MAPI_E_HAS_FOLDERS"),
0x8004060A: ("MAPI_E_HAS_MESAGES"),
0x8004060B: ("MAPI_E_FOLDER_CYCLE"),
0x8004060D: ("MAPI_E_LOCKID_LIMIT"),
0x00040680: ("MAPI_W_PARTIAL_COMPLETION"),
0x80040700: ("MAPI_E_AMBIGUOUS_RECIP"),
0x80040800: ("SYNC_E_OBJECT_DELETED"),
0x80040801: ("SYNC_E_IGNORE"),
0x80040802: ("SYNC_E_CONFLICT"),
0x80040803: ("SYNC_E_NO_PARENT"),
0x80040804: ("SYNC_E_CYCLE_DETECTED"),
0x80040805: ("SYNC_E_UNSYNCHRONIZED"),
0x00040820: ("SYNC_W_PROGRESS"),
0x00040821: ("SYNC_W_CLIENT_CHANGE_NEWER"),
0x80040900: ("MAPI_E_NAMED_PROP_QUOTA_EXCEEDED"),
0x80040FFF: ("MAPI_E_NOT_IMPLEMENTED"),
0x80070005: ("MAPI_E_NO_ACCESS"),
0x8007000E: ("MAPI_E_NOT_ENOUGH_MEMORY"),
0x80070057: ("MAPI_E_INVALID_PARAMETER"),
0x80040920: ("LDAP_NO_SUCH_OBJECT"),
0x80040951: ("LDAP_SERVER_DOWN"),
0x80040952: ("LDAP_LOCAL_ERROR"),
}
MAPI_E_INTERFACE_NO_SUPPORT = 0x80004002
MAPI_E_CALL_FAILED = 0x80004005
MAPI_E_NO_SUPPORT = 0x80040102
MAPI_E_BAD_CHARWIDTH = 0x80040103
MAPI_E_STRING_TOO_LONG = 0x80040105
MAPI_E_UNKNOWN_FLAGS = 0x80040106
MAPI_E_INVALID_ENTRYID = 0x80040107
MAPI_E_INVALID_OBJECT = 0x80040108
MAPI_E_OBJECT_CHANGED = 0x80040109
MAPI_E_OBJECT_DELETED = 0x8004010A
MAPI_E_BUSY = 0x8004010B
MAPI_E_NOT_ENOUGH_DISK = 0x8004010D
MAPI_E_NOT_ENOUGH_RESOURCES = 0x8004010E
MAPI_E_NOT_FOUND = 0x8004010F
MAPI_E_VERSION = 0x80040110
MAPI_E_LOGON_FAILED = 0x80040111
MAPI_E_SESSION_LIMIT = 0x80040112
MAPI_E_USER_CANCEL = 0x80040113
MAPI_E_UNABLE_TO_ABORT = 0x80040114
MAPI_E_NETWORK_ERROR = 0x80040115
MAPI_E_DISK_ERROR = 0x80040116
MAPI_E_TOO_COMPLEX = 0x80040117
MAPI_E_BAD_COLUMN = 0x80040118
MAPI_E_EXTENDED_ERROR = 0x80040119
MAPI_E_COMPUTED = 0x8004011A
MAPI_E_CORRUPT_DATA = 0x8004011B
MAPI_E_UNCONFIGURED = 0x8004011C
MAPI_E_FAILONEPROVIDER = 0x8004011D
MAPI_E_UNKNOWN_CPID = 0x8004011E
MAPI_E_UNKNOWN_LCID = 0x8004011F
MAPI_E_PASSWORD_CHANGE_REQUIRED = 0x<PASSWORD>
MAPI_E_PASSWORD_EXPIRED = <PASSWORD>
MAPI_E_INVALID_WORKSTATION_ACCOUNT = 0x80040122
MAPI_E_INVALID_ACCESS_TIME = 0x80040123
MAPI_E_ACCOUNT_DISABLED = 0x80040124
MAPI_E_END_OF_SESSION = 0x80040200
MAPI_E_UNKNOWN_ENTRYID = 0x80040201
MAPI_E_MISSING_REQUIRED_COLUMN = 0x80040202
MAPI_W_NO_SERVICE = 0x00040203
MAPI_E_BAD_VALUE = 0x80040301
MAPI_E_INVALID_TYPE = 0x80040302
MAPI_E_TYPE_NO_SUPPORT = 0x80040303
MAPI_E_UNEXPECTED_TYPE = 0x80040304
MAPI_E_TOO_BIG = 0x80040305
MAPI_E_DECLINE_COPY = 0x80040306
MAPI_E_UNEXPECTED_ID = 0x80040307
MAPI_W_ERRORS_RETURNED = 0x00040380
MAPI_E_UNABLE_TO_COMPLETE = 0x80040400
MAPI_E_TIMEOUT = 0x80040401
MAPI_E_TABLE_EMPTY = 0x80040402
MAPI_E_TABLE_TOO_BIG = 0x80040403
MAPI_E_INVALID_BOOKMARK = 0x80040405
MAPI_W_POSITION_CHANGED = 0x00040481
MAPI_W_APPROX_COUNT = 0x00040482
MAPI_E_WAIT = 0x80040500
MAPI_E_CANCEL = 0x80040501
MAPI_E_NOT_ME = 0x80040502
MAPI_W_CANCEL_MESSAGE = 0x00040580
MAPI_E_CORRUPT_STORE = 0x80040600
MAPI_E_NOT_IN_QUEUE = 0x80040601
MAPI_E_NO_SUPPRESS = 0x80040602
MAPI_E_COLLISION = 0x80040604
MAPI_E_NOT_INITIALIZED = 0x80040605
MAPI_E_NON_STANDARD = 0x80040606
MAPI_E_NO_RECIPIENTS = 0x80040607
MAPI_E_SUBMITTED = 0x80040608
MAPI_E_HAS_FOLDERS = 0x80040609
MAPI_E_HAS_MESAGES = 0x8004060A
MAPI_E_FOLDER_CYCLE = 0x8004060B
MAPI_E_LOCKID_LIMIT = 0x8004060D
MAPI_W_PARTIAL_COMPLETION = 0x00040680
MAPI_E_AMBIGUOUS_RECIP = 0x80040700
SYNC_E_OBJECT_DELETED = 0x80040800
SYNC_E_IGNORE = 0x80040801
SYNC_E_CONFLICT = 0x80040802
SYNC_E_NO_PARENT = 0x80040803
SYNC_E_CYCLE_DETECTED = 0x80040804
SYNC_E_UNSYNCHRONIZED = 0x80040805
SYNC_W_PROGRESS = 0x00040820
SYNC_W_CLIENT_CHANGE_NEWER = 0x00040821
MAPI_E_NAMED_PROP_QUOTA_EXCEEDED = 0x80040900
MAPI_E_NOT_IMPLEMENTED = 0x80040FFF
MAPI_E_NO_ACCESS = 0x80070005
MAPI_E_NOT_ENOUGH_MEMORY = 0x8007000E
MAPI_E_INVALID_PARAMETER = 0x80070057
LDAP_NO_SUCH_OBJECT = 0x80040920
LDAP_SERVER_DOWN = 0x80040951
LDAP_LOCAL_ERROR = 0x80040952
# PR_DISPLAY_TYPE
# For address book contents tables
DT_MAILUSER = 0x00000000
DT_DISTLIST = 0x00000001
DT_FORUM = 0x00000002
DT_AGENT = 0x00000003
DT_ORGANIZATION = 0x00000004
DT_PRIVATE_DISTLIST = 0x00000005
DT_REMOTE_MAILUSER = 0x00000006
# For address book hierarchy tables
DT_MODIFIABLE = 0x00010000
DT_GLOBAL = 0x00020000
DT_LOCAL = 0x00030000
DT_WAN = 0x00040000
DT_NOT_SPECIFIC = 0x00050000
# For folder hierarchy tables *
DT_FOLDER = 0x01000000
DT_FOLDER_LINK = 0x02000000
DT_FOLDER_SPECIAL = 0x04000000
PR_DISPLAY_TYPE_VALUES = {
0x00000000: "DT_MAILUSER",
0x00000001: "DT_DISTLIST",
0x00000002: "DT_FORUM",
0x00000003: "DT_AGENT",
0x00000004: "DT_ORGANIZATION",
0x00000005: "DT_PRIVATE_DISTLIST",
0x00000006: "DT_REMOTE_MAILUSER",
0x00010000: "DT_MODIFIABLE",
0x00020000: "DT_GLOBAL",
0x00030000: "DT_LOCAL",
0x00040000: "DT_WAN",
0x00050000: "DT_NOT_SPECIFIC",
0x01000000: "DT_FOLDER",
0x02000000: "DT_FOLDER_LINK",
0x04000000: "DT_FOLDER_SPECIAL"
}
# PR_OBJECT_TYPE
MAPI_STORE = 0x1
MAPI_ADDRBOOK = 0x2
MAPI_FOLDER = 0x3
MAPI_ABCONT = 0x4
MAPI_MESSAGE = 0x5
MAPI_MAILUSER = 0x6
MAPI_ATTACH = 0x7
MAPI_DISTLIST = 0x8
MAPI_PROFSECT = 0x9
MAPI_STATUS = 0xA
MAPI_SESSION = 0xB
MAPI_FORMINFO = 0xC
PR_OBJECT_TYPE_VALUES = {
0x1: "MAPI_STORE",
0x2: "MAPI_ADDRBOOK",
0x3: "MAPI_FOLDER",
0x4: "MAPI_ABCONT",
0x5: "MAPI_MESSAGE",
0x6: "MAPI_MAILUSER",
0x7: "MAPI_ATTACH",
0x8: "MAPI_DISTLIST",
0x9: "MAPI_PROFSECT",
0xA: "MAPI_STATUS",
0xB: "MAPI_SESSION",
0xC: "MAPI_FORMINFO"
}
# PR_CONTAINER_FLAGS
AB_RECIPIENTS = 0x00000001
AB_SUBCONTAINERS = 0x00000002
AB_MODIFIABLE = 0x00000004
AB_UNMODIFIABLE = 0x00000008
AB_FIND_ON_OPEN = 0x00000010
AB_NOT_DEFAULT = 0x00000020
AB_CONF_ROOMS = 0x00000200
PR_CONTAINER_FLAGS_VALUES = {
0x00000001: "AB_RECIPIENTS",
0x00000002: "AB_SUBCONTAINERS",
0x00000004: "AB_MODIFIABLE",
0x00000008: "AB_UNMODIFIABLE",
0x00000010: "AB_FIND_ON_OPEN",
0x00000020: "AB_NOT_DEFAULT",
0x00000200: "AB_CONF_ROOMS"
}
MAPI_PROPERTIES = {
# Field_1: (Field_2, Field_3, Field_4, Field_5, Field_6, Field_7, Field_8),
#
# Field_1 is PropertyId
# Field_2 is PropertyType (unicode when possible)
# Field_3 is Active Directory LDAP-Display-Name
# Field_4 is Active Directory CN
# Field_5:
# 1 when Is-Member-Of-Partial-Attribute-Set is TRUE
# 2 when Is-Member-Of-Partial-Attribute-Set is FALSE
# 3 when Is-Member-Of-Partial-Attribute-Set does not exist
# 4 when it's not an Active Directory property
#
# Field_6 is MS-OXPROPS Canonical Name
# Field_7 is MS-OXPROPS First Alternate Name (usually the shortest one which starts from PR_)
# Field_8 is internal Exchange name
0x0806: (0x101f, "msExchResourceSearchProperties", "ms-Exch-Resource-Search-Properties", 1, None, None, None),
0x0807: (0x0003, "msExchResourceCapacity", "ms-Exch-Resource-Capacity", 1, "PidTagAddressBookRoomCapacity", "PR_EMS_AB_ROOM_CAPACITY", None),
0x0808: (0x101f, "msExchResourceMetaData", "ms-Exch-Resource-Meta-Data", 1, None, None, None),
0x0809: (0x001f, "msExchResourceDisplay", "ms-Exch-Resource-Display", 1, "PidTagAddressBookRoomDescription", "PR_EMS_AB_ROOM_DESCRIPTION", None),
0x3004: (0x001f, "info", "Comment", 1, "PidTagComment", "PR_COMMENT", "Comment"),
0x3007: (0x0040, "whenCreated", "When-Created", 1, "PidTagCreationTime", "PR_CREATION_TIME", "CreationTime"),
0x3008: (0x0040, "whenChanged", "When-Changed", 1, "PidTagLastModificationTime", "PR_LAST_MODIFICATION_TIME", "LastModificationTime"),
0x3905: (0x0003, "msExchRecipientDisplayType", "ms-Exch-Recipient-Display-Type", 1, "PidTagDisplayTypeEx", "PR_DISPLAY_TYPE_EX", "DisplayTypeEx"),
0x39fe: (0x001f, "mail", "E-mail-Addresses", 1, "PidTagSmtpAddress", "PR_SMTP_ADDRESS", "SmtpAddress"),
0x39ff: (0x001f, "displayNamePrintable", "Display-Name-Printable", 1, "PidTagAddressBookDisplayNamePrintable", "PR_EMS_AB_DISPLAY_NAME_PRINTABLE", "SimpleDisplayName"),
0x3a00: (0x001f, "mailNickname", "ms-Exch-Mail-Nickname", 1, "PidTagAccount", "PR_ACCOUNT", "Account"),
0x3a06: (0x001f, "givenName", "Given-Name", 1, "PidTagGivenName", "PR_GIVEN_NAME", "GivenName"),
0x3a08: (0x001f, "telephoneNumber", "Telephone-Number", 1, "PidTagBusinessTelephoneNumber", "PR_BUSINESS_TELEPHONE_NUMBER", "BusinessTelephoneNumber"),
0x3a09: (0x001f, "homePhone", "Phone-Home-Primary", 1, "PidTagHomeTelephoneNumber", "PR_HOME_TELEPHONE_NUMBER", "HomeTelephoneNumber"),
0x3a0a: (0x001f, "initials", "Initials", 1, "PidTagInitials", "PR_INITIALS", "Initials"),
0x3a0f: (0x001f, "cn", "Common-Name", 1, "PidTagMessageHandlingSystemCommonName", "PR_MHS_COMMON_NAME", "MhsCommonName"),
0x3a11: (0x001f, "sn", "Surname", 1, "PidTagSurname", "PR_SURNAME", "Surname"),
0x3a16: (0x001f, "company", "Company", 1, "PidTagCompanyName", "PR_COMPANY_NAME", "CompanyName"),
0x3a17: (0x001f, "title", "Title", 1, "PidTagTitle", "PR_TITLE", "Title"),
0x3a18: (0x001f, "department", "Department", 1, "PidTagDepartmentName", "PR_DEPARTMENT_NAME", "DepartmentName"),
0x3a19: (0x001f, "physicalDeliveryOfficeName", "Physical-Delivery-Office-Name", 1, "PidTagOfficeLocation", "PR_OFFICE_LOCATION", "OfficeLocation"),
0x3a1b: (0x101f, "otherTelephone", "Phone-Office-Other", 1, "PidTagBusiness2TelephoneNumbers", "PR_BUSINESS2_TELEPHONE_NUMBER_A_MV", "Business2TelephoneNumber"),
0x3a1c: (0x001f, "mobile", "Phone-Mobile-Primary", 1, "PidTagMobileTelephoneNumber", "PR_MOBILE_TELEPHONE_NUMBER", "MobileTelephoneNumber"),
0x3a21: (0x001f, "pager", "Phone-Pager-Primary", 1, "PidTagPagerTelephoneNumber", "PR_PAGER_TELEPHONE_NUMBER", "PagerTelephoneNumber"),
0x3a22: (0x0102, "userCert", "User-Cert", 1, "PidTagUserCertificate", "PR_USER_CERTIFICATE", "UserCertificate"),
0x3a23: (0x001f, "facsimileTelephoneNumber", "Facsimile-Telephone-Number", 1, "PidTagPrimaryFaxNumber", "PR_PRIMARY_FAX_NUMBER", "PrimaryFaxNumber"),
0x3a26: (0x001f, "co", "Text-Country", 1, "PidTagCountry", "PR_COUNTRY", "Country"),
0x3a27: (0x001f, "l", "Locality-Name", 1, "PidTagLocality", "PR_LOCALITY", "Locality"),
0x3a28: (0x001f, "st", "State-Or-Province-Name", 1, "PidTagStateOrProvince", "PR_STATE_OR_PROVINCE", "StateOrProvince"),
0x3a29: (0x001f, "streetAddress", "Address", 1, "PidTagStreetAddress", "PR_STREET_ADDRESS", "StreetAddress"),
0x3a2a: (0x001f, "postalCode", "Postal-Code", 1, "PidTagPostalCode", "PR_POSTAL_CODE", "PostalCode"),
0x3a2b: (0x101f, "postOfficeBox", "Post-Office-Box", 1, "PidTagPostOfficeBox", "PR_POST_OFFICE_BOX", "PostOfficeBox"),
0x3a2c: (0x1102, "telexNumber", "Telex-Number", 3, "PidTagTelexNumber", "PR_TELEX_NUMBER", "TelexNumber"),
0x3a2e: (0x001f, "telephoneAssistant", "ms-Exch-Telephone-Assistant", 1, "PidTagAssistantTelephoneNumber", "PR_ASSISTANT_TELEPHONE_NUMBER", "AssistantTelephoneNumber"),
0x3a2f: (0x101f, "otherHomePhone", "Phone-Home-Other", 1, "PidTagHome2TelephoneNumbers", "PR_HOME2_TELEPHONE_NUMBER_A_MV", "Home2TelephoneNumber"),
0x3a30: (0x001f, "msExchAssistantName", "ms-Exch-Assistant-Name", 1, "PidTagAssistant", "PR_ASSISTANT", "Assistant"),
0x3a40: (0x000b, "mAPIRecipient", "ms-Exch-MAPI-Recipient", 1, "PidTagSendRichInfo", "PR_SEND_RICH_INFO", "SendRichInfo"),
0x3a5d: (0x001f, "homePostalAddress", "Address-Home", 1, "PidTagHomeAddressStreet", "PR_HOME_ADDRESS_STREET", "HomeAddressStreet"),
0x3a70: (0x1102, "userSMIMECertificate", "User-SMIME-Certificate", 1, "PidTagUserX509Certificate", "PR_USER_X509_CERTIFICATE", "UserSMimeCertificate"),
0x3a71: (0x0003, "internetEncoding", "ms-Exch-Internet-Encoding", 1, "PidTagSendInternetEncoding", "PR_SEND_INTERNET_ENCODING", "SendInternetEncoding"),
0x8003: (0x1102, "cACertificate", "CA-Certificate", 1, None, None, None),
0x8004: (0x001f, "folderPathname", "ms-Exch-Folder-Pathname", 1, "PidTagAddressBookFolderPathname", "PR_EMS_AB_FOLDER_PATHNAME", None),
0x8005: (0x000d, "manager", "Manager", 1, "PidTagAddressBookManagerDistinguishedName", "PR_EMS_AB_MANAGER_T", None),
0x8006: (0x000d, "homeMDB", "ms-Exch-Home-MDB", 1, "PidTagAddressBookHomeMessageDatabase", "PR_EMS_AB_HOME_MDB", "HomeMdb"),
0x8007: (0x001f, "homeMTA", "ms-Exch-Home-MTA", 1, "PidLidContactItemData", "dispidContactItemData", None),
0x8008: (0x000d, "memberOf", "Is-Member-Of-DL", 3, "PidTagAddressBookIsMemberOfDistributionList", "PR_EMS_AB_IS_MEMBER_OF_DL", "MemberOf"),
0x8009: (0x000d, "member", "Member", 1, "PidTagAddressBookMember", "PR_EMS_AB_MEMBER", "Members"),
0x800a: (0x001f, "autoReplyMessage", "ms-Exch-AutoReply-Message", 2, None, None, None),
0x800b: (0x000b, "autoReply", "ms-Exch-AutoReply", 1, None, None, None),
0x800c: (0x000d, "managedBy", "Managed-By", 1, "PidTagAddressBookOwner", "PR_EMS_AB_OWNER_O", "ManagedBy"),
0x800d: (0x001f, "kMServer", "ms-Exch-KM-Server", 2, None, None, None),
0x800e: (0x000d, "directReports", "Reports", 3, "PidTagAddressBookReports", "PR_EMS_AB_REPORTS", None),
0x800f: (0x101f, "proxyAddresses", "Proxy-Addresses", 1, "PidTagAddressBookProxyAddresses", "PR_EMS_AB_PROXY_ADDRESSES", "ProxyAddresses"),
0x8010: (0x0102, "helpData32", "Help-Data32", 3, "PidLidDepartment", "dispidDepartment", "TemplateInfoHelpFileContents"),
0x8011: (0x001f, "targetAddress", "ms-Exch-Target-Address", 1, "PidTagAddressBookTargetAddress", "PR_EMS_AB_TARGET_ADDRESS", None),
0x8014: (0x000d, "homeMDBBL", "ms-Exch-Home-MDB-BL", 1, None, None, None),
0x8015: (0x000d, "publicDelegates", "ms-Exch-Public-Delegates", 1, "PidTagAddressBookPublicDelegates", "PR_EMS_AB_PUBLIC_DELEGATES", "GrantSendOnBehalfTo"),
0x8016: (0x0102, "certificateRevocationList", "Certificate-Revocation-List", 3, None, None, None),
0x8017: (0x0102, "addressEntryDisplayTable", "Address-Entry-Display-Table", 3, None, None, "TemplateInfoTemplate"),
0x8018: (0x0102, "addressSyntax", "Address-Syntax", 3, None, None, "TemplateInfoScript"),
0x8023: (0x0102, "businessRoles", "ms-Exch-Business-Roles", 2, "PidLidContactCharacterSet", "dispidContactCharSet", None),
0x8024: (0x000d, "managedObjects", "Managed-Objects", 3, "PidTagAddressBookOwnerBackLink", "PR_EMS_AB_OWNER_BL_O", None),
0x8025: (0x1102, "crossCertificatePair", "Cross-Certificate-Pair", 3, "PidLidAutoLog", "dispidAutoLog", None),
0x8026: (0x1102, "authorityRevocationList", "Authority-Revocation-List", 3, "PidLidFileUnderList", "dispidFileUnderList", None),
0x8027: (0x0102, "objectSid", "Object-Sid", 1, None, None, None),
0x8028: (0x0040, "expirationTime", "ms-Exch-Expiration-Time", 2, "PidLidAddressBookProviderEmailList", "dispidABPEmailList", None),
0x8029: (0x0003, "uSNChanged", "USN-Changed", 1, "PidLidAddressBookProviderArrayType", "dispidABPArrayType", None),
0x802d: (0x001f, "extensionAttribute1", "ms-Exch-Extension-Attribute-1", 1, "PidTagAddressBookExtensionAttribute1", "PR_EMS_AB_EXTENSION_ATTRIBUTE_1", None),
0x802e: (0x001f, "extensionAttribute2", "ms-Exch-Extension-Attribute-2", 1, "PidTagAddressBookExtensionAttribute2", "PR_EMS_AB_EXTENSION_ATTRIBUTE_2", None),
0x802f: (0x001f, "extensionAttribute3", "ms-Exch-Extension-Attribute-3", 1, "PidTagAddressBookExtensionAttribute3", "PR_EMS_AB_EXTENSION_ATTRIBUTE_3", None),
0x8030: (0x001f, "extensionAttribute4", "ms-Exch-Extension-Attribute-4", 1, "PidTagAddressBookExtensionAttribute4", "PR_EMS_AB_EXTENSION_ATTRIBUTE_4", None),
0x8031: (0x001f, "extensionAttribute5", "ms-Exch-Extension-Attribute-5", 1, "PidTagAddressBookExtensionAttribute5", "PR_EMS_AB_EXTENSION_ATTRIBUTE_5", None),
0x8032: (0x001f, "extensionAttribute6", "ms-Exch-Extension-Attribute-6", 1, "PidTagAddressBookExtensionAttribute6", "PR_EMS_AB_EXTENSION_ATTRIBUTE_6", None),
0x8033: (0x001f, "extensionAttribute7", "ms-Exch-Extension-Attribute-7", 1, "PidTagAddressBookExtensionAttribute7", "PR_EMS_AB_EXTENSION_ATTRIBUTE_7", None),
0x8034: (0x001f, "extensionAttribute8", "ms-Exch-Extension-Attribute-8", 1, "PidTagAddressBookExtensionAttribute8", "PR_EMS_AB_EXTENSION_ATTRIBUTE_8", None),
0x8035: (0x001f, "extensionAttribute9", "ms-Exch-Extension-Attribute-9", 1, "PidTagAddressBookExtensionAttribute9", "PR_EMS_AB_EXTENSION_ATTRIBUTE_9", None),
0x8036: (0x001f, "extensionAttribute10", "ms-Exch-Extension-Attribute-10", 1, "PidTagAddressBookExtensionAttribute10", "PR_EMS_AB_EXTENSION_ATTRIBUTE_10", None),
0x8037: (0x1102, "securityProtocol", "ms-Exch-Security-Protocol", 1, | |
<gh_stars>10-100
import logging
import os
import argparse
import random
from tqdm import tqdm, trange
import json
import re
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from Bert_tokenization import BertTokenizer
from Bert_modeling import BertForMultipleChoice
from Bert_optimization import BertAdam
from file_utils import PYTORCH_PRETRAINED_BERT_CACHE
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
class Baidu_dev_example(object):
"""A single training/test example for the SWAG dataset."""
def __init__(self,
qas_id,
type,
question,
choice_0,
choice_1,
choice_2,
choice_3,
choice_4,
choice_5,
choice_6,
choice_7,
choice_8,
choice_9,
choice_10,
choice_11,
choice_12,
choice_13,
choice_14,
choice_15,
choice_16,
choice_17,
choice_18,
choice_19,
label = None):
self.qas_id = qas_id
self.type = type
self.question = question
self.choices = [
choice_0,
choice_1,
choice_2,
choice_3,
choice_4,
choice_5,
choice_6,
choice_7,
choice_8,
choice_9,
choice_10,
choice_11,
choice_12,
choice_13,
choice_14,
choice_15,
choice_16,
choice_17,
choice_18,
choice_19,
]
self.label = label
def __str__(self):
return self.__repr__()
def __repr__(self):
l = [
f"qas_id: {self.qas_id}",
f"type: {self.type}",
f"question: {self.question}",
f"choice_0: {self.choices[0]}",
f"choice_1: {self.choices[1]}",
f"choice_2:{self.choices[2]}",
f"choice_3:{self.choices[3]}",
f"choice_4:{self.choices[4]}",
f"choice_5:{self.choices[5]}",
f"choice_6:{self.choices[6]}",
f"choice_7:{self.choices[7]}",
f"choice_8:{self.choices[8]}",
f"choice_9:{self.choices[9]}",
f"choice_10:{self.choices[10]}",
f"choice_11:{self.choices[11]}",
f"choice_12:{self.choices[12]}",
f"choice_13:{self.choices[13]}",
f"choice_14:{self.choices[14]}",
f"choice_15:{self.choices[15]}",
f"choice_16:{self.choices[16]}",
f"choice_17:{self.choices[17]}",
f"choice_18:{self.choices[18]}",
f"choice_19:{self.choices[19]}",
]
if self.label is not None:
l.append(f"label: {self.label}")
return ", ".join(l)
class InputFeatures(object):
def __init__(self,
example_id,
choices_features,
label
):
self.example_id = example_id
self.choices_features = [
{
'input_ids': input_ids,
'input_mask': input_mask,
'segment_ids': segment_ids
}
for _, input_ids, input_mask, segment_ids in choices_features
]
self.label = label
def clean_space(text):
""""
处理多余的空格
"""
match_regex = re.compile(u'[\u4e00-\u9fa5。\.,,::《》、\(\)()]{1} +(?<![a-zA-Z])|\d+ +| +\d+|[a-z A-Z]+')
should_replace_list = match_regex.findall(text)
order_replace_list = sorted(should_replace_list,key=lambda i:len(i),reverse=True)
for i in order_replace_list:
if i == u' ':
continue
new_i = i.strip()
text = text.replace(i,new_i)
return text
def read_baidu_dev_example(input_file,is_training=True):
with open(input_file, "r", encoding='utf-8') as reader:
examples = []
for line in reader:
example = json.loads(line)
uni_id = example['question_id']
question = example['question']
type = example['question_type']
answers = example['answers']
choices = []
rouge = []
for answer in answers:
choices.append(clean_space(answer['text']))
rouge.append(answer['RougeL'])
label = rouge.index(max(rouge))
example = Baidu_dev_example(
qas_id=uni_id,
type=type,
question=question,
choice_0=choices[0],
choice_1=choices[1],
choice_2=choices[2],
choice_3=choices[3],
choice_4=choices[4],
choice_5=choices[5],
choice_6=choices[6],
choice_7=choices[7],
choice_8=choices[8],
choice_9=choices[9],
choice_10=choices[10],
choice_11=choices[11],
choice_12=choices[12],
choice_13=choices[13],
choice_14=choices[14],
choice_15=choices[15],
choice_16=choices[16],
choice_17=choices[17],
choice_18=choices[18],
choice_19=choices[19],
label=label
)
examples.append(example)
return examples
def convert_examples_to_features(examples, tokenizer, max_seq_length,
is_training):
"""Loads a data file into a list of `InputBatch`s."""
# MCScript is a multiple choice task.
# Each choice will correspond to a sample on which we run the
# inference. For a given MCscript example, we will create the 23.
# following inputs:
# - [CLS] context [SEP] choice_1 [SEP]
# - [CLS] context [SEP] choice_2 [SEP]
# The model will output a single value for each input. To get the
# final decision of the model, we will run a softmax over these 2
# outputs.
features = []
for example_index, example in enumerate(examples):
context_tokens = tokenizer.tokenize(example.type)
question_tokens = tokenizer.tokenize(example.question)
choices_features = []
for choice_index, choice in enumerate(example.choices):
# We create a copy of the context tokens in order to be
# able to shrink it according to ending_tokens
context_tokens_choice = context_tokens[:] + question_tokens[:]
ending_tokens = tokenizer.tokenize(choice)
# Modifies `context_tokens_choice` and `ending_tokens` in
# place so that the total length is less than the
# specified length. Account for [CLS], [SEP], [SEP] with
# "- 3"
_truncate_seq_pair(context_tokens_choice, ending_tokens, max_seq_length - 3)
tokens = ["[CLS]"] + context_tokens_choice + ["[SEP]"] + ending_tokens + ["[SEP]"]
segment_ids = [0] * (len(context_tokens_choice) + 2) + [1] * (len(ending_tokens) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
choices_features.append((tokens, input_ids, input_mask, segment_ids))
label = example.label
# if example_index < 5:
# logger.info("*** Example ***")
# logger.info(f"qas_id: {example.qas_id}")
# for choice_idx, (tokens, input_ids, input_mask, segment_ids) in enumerate(choices_features):
# logger.info(f"choice: {choice_idx}")
# logger.info(f"tokens: {' '.join(tokens)}")
# logger.info(f"input_ids: {' '.join(map(str, input_ids))}")
# logger.info(f"input_mask: {' '.join(map(str, input_mask))}")
# logger.info(f"segment_ids: {' '.join(map(str, segment_ids))}")
# if is_training:
# logger.info(f"label: {label}")
features.append(
InputFeatures(
example_id = example.qas_id,
choices_features = choices_features,
label = label
)
)
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def select_field(features, field):
return [
[
choice[field]
for choice in feature.choices_features
]
for feature in features
]
def warmup_linear(x, warmup=0.002):
if x < warmup:
return x/warmup
return 1.0 - x
def model_eval(model,device,input_examples,do_train=False):
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in input_examples:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss = model(input_ids, segment_ids, input_mask, label_ids)
logits = model(input_ids, segment_ids, input_mask)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
if do_train:
result = {'train_loss': eval_loss,
'train_accuracy': eval_accuracy}
logger.info("***** Train results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
else:
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,}
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
return eval_accuracy,result
def model_save(model,output_dir,name):
model_to_save = model.module if hasattr(model, 'module') else model # Only save the model it-self
output_model_file = os.path.join(output_dir, name)
torch.save(model_to_save.state_dict(), output_model_file)
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .json files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
args = parser.parse_args()
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
device, n_gpu, bool(args.local_rank != -1), args.fp16))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size // args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
# raise ValueError("Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
| |
<filename>treedlib/treedlib/templates.py
from itertools import chain
import re
import lxml.etree as et
from collections import defaultdict
# NODESET:
# ===========
class NodeSet:
"""
NodeSet objects are functions f : 2^T -> 2^T
---------------
They are applied compositionally and lazily, by constructing an xpath query
We use these to get the *subtree* or set of nodes that our indicicator features will
operate over
"""
def __init__(self, label='NODESET', xpath='//*', psort=None):
self.label = label
self.xpath = xpath
self.psort = psort # Attribute to sort on post-xpath execution
def __repr__(self):
return '<%s, xpath="%s">' % (self.label, self.xpath)
class Mention(NodeSet):
"""Gets candidate mention nodes"""
def __init__(self, cid=0):
self.label = 'MENTION'
self.xpath = "//*[{%s}]" % str(cid)
class LeftSiblings(NodeSet):
"""Gets preceding siblings"""
def __init__(self, ns, w=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'LEFT-OF-%s' % ns.label
self.xpath = '%s[1]/preceding-sibling::*[position() <= %s]' % (ns.xpath, w)
class RightSiblings(NodeSet):
"""Gets following siblings"""
def __init__(self, ns, w=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'RIGHT-OF-%s' % ns.label
self.xpath = '%s[1]/following-sibling::*[position() <= %s]' % (ns.xpath, w)
# TODO: These should be "Descendants" / "Ancestors"...
class Children(NodeSet):
"""Gets children of the node set"""
def __init__(self, ns):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'CHILDREN-OF-%s' % ns.label
self.xpath = ns.xpath + '[1]/*'
class Parents(NodeSet):
"""Gets parents of the node set"""
def __init__(self, ns, num_parents=1):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'PARENTS-OF-%s' % ns.label
self.xpath = ns.xpath + '[1]/ancestor::*[position()<%s]' % (num_parents + 1)
class Between(NodeSet):
"""
Gets the nodes between two node sets
Note: this requires some ugly xpath... could change this to non-xpath method
"""
def __init__(self, ns1, ns2):
self.__dict__.update(ns1.__dict__) # inherit *FIRST* child object's attributes
self.label = 'BETWEEN-%s-and-%s' % (ns1.label, ns2.label)
self.xpath = "{0}[1]/ancestor-or-self::*[count(. | {1}[1]/ancestor-or-self::*) = count({1}[1]/ancestor-or-self::*)][1]/descendant-or-self::*[((count(.{0}) = count({0})) or (count(.{1}) = count({1})))]".format(ns1.xpath, ns2.xpath)
class SeqBetween(NodeSet):
"""
Gets the sequence of nodes in between, according to *sentence* (not dep tree) order
"""
def __init__(self, seq_attrib='word_idx'):
# TODO: Extend to take in pair of NodeSets?
self.xpath = '//*'
self.label = 'SEQ-BETWEEN'
self.seq_attrib = seq_attrib # Logic gets pushed to Indicator...
self.psort = seq_attrib # Specify that post-xpath sorting needs to be done
class Filter(NodeSet):
"""
Gets a subset of the nodes filtered by some node attribute
Note the option to do exact match or starts with (could be expanded; useful for POS now...)
"""
def __init__(self, ns, filter_attr, filter_by, starts_with=True):
self.__dict__.update(ns.__dict__) # inherit child object's attributes
self.label = 'FILTER-BY(%s=%s):%s' % (filter_attr, filter_by, ns.label)
temp = "[starts-with(@%s, '%s')]" if starts_with else "[@%s='%s']"
self.xpath = ns.xpath + temp % (filter_attr, filter_by)
# INDICATOR:
# ===========
def compile_dict_sub(brown_clusters_path=None, user_dicts=[]):
"""
Takes in a list of tuples of form (DICT_LABEL, set_of_words)
AND/OR a file path to a tsv file list of (word, brown cluster id) lines
And returns a single dictionary mapping from word -> DICT_LABEL, based on priority ordering
Assume user dicts take priority over brown clusters...
"""
dict_sub = {}
# User ditionaries
for dict_label, words in user_dicts:
for word in words:
if word not in dict_sub:
dict_sub[word] = dict_label
# Brown clusters
if brown_clusters_path is not None:
with open(brown_clusters_path, 'rb') as f:
for line in f:
word, cluster_id = line.rstrip().split('\t')
dict_sub[word] = 'BC-%s' % cluster_id
return dict_sub
class Indicator:
"""
Indicator objects are functions f : 2^T -> {0,1}^F
---------------
Indicator objects take a NodeSet, an attibute or attributes, and apply some indicator
function to the specified attributes of the NodeSet
"""
def __init__(self, ns, attribs):
self.ns = ns
self.attribs = attribs
def apply(self, root, cids, cid_attrib='word_idx', feat_label=True, inv_tag=True, stopwords=None, dict_sub={}):
"""
Apply the feature template to the xml tree provided
A list of lists of candidate mention ids are passed in, as well as a cid_attrib
These identify the candidate mentions refered to by index in Mention
For example, cids=[[1,2]], cid_attrib='word_idx' will have mention 0 as the set of nodes
that have word inde 1 and 2
"""
# Sub in the candidate mention identifiers provided
m = [" or ".join("@%s='%s'" % (cid_attrib, c) for c in cid) for cid in cids]
xpath = self.ns.xpath.format(*m)
# INV tag if binary relation
inv = 'INV_' if inv_tag and len(cids) == 2 and cids[0][0] > cids[1][0] else ''
# Get nodes
nodes = root.xpath(xpath)
# Filter stopwords
if stopwords is not None and len(stopwords) > 0:
nodes = filter(lambda n : n.get('word') not in stopwords and n.get('lemma') not in stopwords, nodes)
# Perform seq filter here
if hasattr(self.ns, 'seq_attrib') and self.ns.seq_attrib is not None:
seqa = self.ns.seq_attrib
b = (cids[0][-1], cids[-1][0]) if cids[0][-1] < cids[-1][0] else (cids[-1][-1], cids[0][0])
nodes = filter(lambda n : n.get(seqa) is not None and int(n.get(seqa)) > b[0] and int(n.get(seqa)) < b[1], nodes)
# If sort specified, perform here
if hasattr(self.ns, 'psort') and self.ns.psort is not None:
nodes.sort(key=lambda n : int(n.get(self.ns.psort)))
# Specifically handle single attrib or multiple attribs per node here
try:
attribs = re.split(r'\s*,\s*', self.attribs)
res = ['|'.join(str(node.get(a)) for a in attribs) for node in nodes]
label = '%s%s:%s' % (inv, '|'.join(attribs).upper(), self.ns.label)
# Check each result value against a dictionary which maps string -> DICT_NAME,
# and replace with the value "DICT_NAME"
# NOTE: Only apply to word/lemma indicators for now
if len(attribs) == 1 and attribs[0] in ('word', 'lemma') and len(dict_sub) > 0:
res = [dict_sub.get(a, a) for a in res]
except AttributeError:
res = nodes
label = '%s%s' % (inv, self.ns.label)
# Only yield if non-zero result set; process through _get_features fn
if len(res) > 0:
for feat in self._get_features(res):
if feat_label:
yield '%s[%s]' % (label, feat)
else:
yield feat
def _get_features(self, res):
"""
Given a result set of attribute values, return a set of strings representing the features
This should be the default method to replace for Indicator objects
"""
return [' '.join(res)]
def print_apply(self, root, cids, cid_attrib='word_idx', feat_label=True, dict_sub={}, stopwords=None):
for feat in self.apply(root, cids, cid_attrib, feat_label=feat_label, dict_sub=dict_sub, stopwords=stopwords):
print feat
def result_set(self, root, cids, cid_attrib='word_idx', feat_label=False, dict_sub={}, stopwords=None):
"""Get results as a set- mostly for use in DSR applications"""
res = set()
for feat in self.apply(root, cids, cid_attrib=cid_attrib, feat_label=feat_label, dict_sub=dict_sub, stopwords=stopwords):
res.add(feat)
return res
def __repr__(self):
return '<%s:%s:%s, xpath="%s">' % (self.__class__.__name__, self.attribs, self.ns.label, self.ns.xpath)
class Ngrams(Indicator):
"""
Return indicator features over the ngrams of a result set
If ng arg is an int, will get ngrams of *exactly* this length
If ng arg is a list/tuple, will get all ngrams of this range, *inclusive*
"""
def __init__(self, ns, attribs, ng):
self.ns = ns
self.attribs = attribs
if (type(ng) == int and ng > 0) or (type(ng) in [list, tuple] and ng[0] > 0):
self.ng = ng
else:
raise ValueError("Improper ngram range: %s" % ng)
def _get_features(self, res):
if type(self.ng) == int:
r = [self.ng - 1]
else:
r = range(self.ng[0] - 1, min(len(res), self.ng[1]))
return chain.from_iterable([' '.join(res[s:s+l+1]) for s in range(len(res)-l)] for l in r)
class RightNgrams(Indicator):
"""Return all the ngrams which start at position 0"""
def _get_features(self, res):
return [' '.join(res[:l]) for l in range(1, len(res)+1)]
class LeftNgrams(Indicator):
"""Return all the ngrams which start at position 0"""
def _get_features(self, res):
return [' '.join(res[l:]) for l in range(len(res))]
class Regexp(Indicator):
"""
Return indicator features if the regular expression applied to the
concatenation of the result set strings is not None
"""
def __init__(self, ns, attribs, rgx, rgx_label, sep=' '):
self.ns = ns
self.attribs = attribs
self.rgx = rgx
self.rgx_label = rgx_label
self.sep = sep
self.psort = 'word_idx' # Sort by word order...
def _get_features(self, res):
match = re.search(self.rgx, self.sep.join(res))
if match is not None:
yield 'RGX:%s' % self.rgx_label
class LengthBin(Indicator):
"""
Return indicator features for the length (size) of the node set
binned according to provided values
bins should be a list of INTS
"""
def __init__(self, ns, bin_divs):
self.ns = ns
self.bins = []
for i,d in enumerate(bin_divs):
if i == 0:
self.bins.append((0,d-1))
else:
self.bins.append((bin_divs[i-1],d-1))
def _get_features(self, res):
lbin = None
l = len(res)
for b in self.bins:
if l >= b[0] and l <= b[1]:
lbin = b
break
if lbin is None:
lbin = (self.bins[-1][1]+1, 'inf')
yield | |
exception to be raised from expects
:return: The HTTP response
:rtype: :class:`aiohttp.ClientResponse`
:raises: :class:`.UnhandledProviderError` Raised if expects is defined
:raises: :class:`.WaterButlerError` Raised if invalid HTTP method is provided
"""
kwargs['headers'] = self.build_headers(**kwargs.get('headers', {}))
no_auth_header = kwargs.pop('no_auth_header', False)
if no_auth_header:
kwargs['headers'].pop('Authorization')
retry = _retry = kwargs.pop('retry', 2)
expects = kwargs.pop('expects', None)
throws = kwargs.pop('throws', exceptions.UnhandledProviderError)
byte_range = kwargs.pop('range', None)
if byte_range:
kwargs['headers']['Range'] = self._build_range_header(byte_range)
connector = kwargs.pop('connector', None)
session = self.get_or_create_session(connector=connector)
method = method.upper()
while retry >= 0:
# Don't overwrite the callable ``url`` so that signed URLs are refreshed for every retry
non_callable_url = url() if callable(url) else url
try:
self.provider_metrics.incr('requests.count')
# TODO: use a `dict` to select methods with either `lambda` or `functools.partial`
if method == 'GET':
response = await session.get(non_callable_url,
timeout=wb_settings.AIOHTTP_TIMEOUT,
*args, **kwargs)
elif method == 'PUT':
response = await session.put(non_callable_url,
timeout=wb_settings.AIOHTTP_TIMEOUT,
*args, **kwargs)
elif method == 'POST':
response = await session.post(non_callable_url,
timeout=wb_settings.AIOHTTP_TIMEOUT,
*args, **kwargs)
elif method == 'HEAD':
response = await session.head(non_callable_url, *args, **kwargs)
elif method == 'DELETE':
response = await session.delete(non_callable_url, **kwargs)
elif method == 'PATCH':
response = await session.patch(non_callable_url, *args, **kwargs)
elif method == 'OPTIONS':
response = await session.options(non_callable_url, *args, **kwargs)
elif method in wb_settings.WEBDAV_METHODS:
# `aiohttp.ClientSession` only has functions available for native HTTP methods.
# For WebDAV (a protocol that extends HTTP) ones, WB lets the `ClientSession`
# instance call `_request()` directly and then wraps the return object with
# `aiohttp.client._RequestContextManager`.
response = await _RequestContextManager(
session._request(method, url, *args, **kwargs)
)
else:
raise exceptions.WaterButlerError('Unsupported HTTP method ...')
self.provider_metrics.incr('requests.tally.ok')
if expects and response.status not in expects:
unexpected = await exceptions.exception_from_response(response,
error=throws, **kwargs)
raise unexpected
return response
except throws as e:
self.provider_metrics.incr('requests.tally.nok')
if retry <= 0 or e.code not in self._retry_on:
raise
await asyncio.sleep((1 + _retry - retry) * 2)
retry -= 1
def request(self, *args, **kwargs):
return RequestHandlerContext(self.make_request(*args, **kwargs))
async def move(self,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
rename: str=None,
conflict: str='replace',
handle_naming: bool=True) -> typing.Tuple[wb_metadata.BaseMetadata, bool]:
"""Moves a file or folder from the current provider to the specified one
Performs a copy and then a delete.
Calls :func:`BaseProvider.intra_move` if possible.
:param dest_provider: ( :class:`.BaseProvider` ) The provider to move to
:param src_path: ( :class:`.WaterButlerPath` ) Path to where the resource can be found
:param dest_path: ( :class:`.WaterButlerPath` ) Path to where the resource will be moved
:param rename: ( :class:`str` ) The desired name of the resulting path, may be incremented
:param conflict: ( :class:`str` ) What to do in the event of a name conflict, ``replace`` or ``keep``
:param handle_naming: ( :class:`bool` ) If a naming conflict is detected, should it be automatically handled?
"""
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict}
self.provider_metrics.add('move', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('move.can_intra_move', False)
if self.can_intra_move(dest_provider, src_path):
self.provider_metrics.add('move.can_intra_move', True)
return await self.intra_move(*args)
if src_path.is_dir:
meta_data, created = await self._folder_file_op(self.move, *args, **kwargs) # type: ignore
else:
meta_data, created = await self.copy(*args, handle_naming=False, **kwargs) # type: ignore
await self.delete(src_path)
return meta_data, created
async def copy(self,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
rename: str=None, conflict: str='replace',
handle_naming: bool=True) \
-> typing.Tuple[wb_metadata.BaseMetadata, bool]:
args = (dest_provider, src_path, dest_path)
kwargs = {'rename': rename, 'conflict': conflict, 'handle_naming': handle_naming}
self.provider_metrics.add('copy', {
'got_handle_naming': handle_naming,
'conflict': conflict,
'got_rename': rename is not None,
})
if handle_naming:
dest_path = await dest_provider.handle_naming(
src_path,
dest_path,
rename=rename,
conflict=conflict,
)
args = (dest_provider, src_path, dest_path)
kwargs = {}
# files and folders shouldn't overwrite themselves
if (
self.shares_storage_root(dest_provider) and
src_path.materialized_path == dest_path.materialized_path
):
raise exceptions.OverwriteSelfError(src_path)
self.provider_metrics.add('copy.can_intra_copy', False)
if self.can_intra_copy(dest_provider, src_path):
self.provider_metrics.add('copy.can_intra_copy', True)
return await self.intra_copy(*args)
if src_path.is_dir:
return await self._folder_file_op(self.copy, *args, **kwargs) # type: ignore
download_stream = await self.download(src_path)
if getattr(download_stream, 'name', None):
dest_path.rename(download_stream.name)
return await dest_provider.upload(download_stream, dest_path)
async def _folder_file_op(self,
func: typing.Callable,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
**kwargs) -> typing.Tuple[wb_metadata.BaseFolderMetadata, bool]:
"""Recursively apply func to src/dest path.
Called from: func: copy and move if src_path.is_dir.
Calls: func: dest_provider.delete and notes result for bool: created
func: dest_provider.create_folder
func: dest_provider.revalidate_path
func: self.metadata
:param coroutine func: to be applied to src/dest path
:param *Provider dest_provider: Destination provider
:param *ProviderPath src_path: Source path
:param *ProviderPath dest_path: Destination path
"""
assert src_path.is_dir, 'src_path must be a directory'
assert asyncio.iscoroutinefunction(func), 'func must be a coroutine'
try:
await dest_provider.delete(dest_path)
created = False
except exceptions.ProviderError as e:
if e.code != 404:
raise
created = True
folder = await dest_provider.create_folder(dest_path, folder_precheck=False)
dest_path = await dest_provider.revalidate_path(dest_path.parent, dest_path.name, folder=dest_path.is_dir)
folder.children = []
items = await self.metadata(src_path) # type: ignore
# Metadata returns a union, which confuses mypy
self.provider_metrics.append('_folder_file_ops.item_counts', len(items)) # type: ignore
for i in range(0, len(items), wb_settings.OP_CONCURRENCY): # type: ignore
futures = []
for item in items[i:i + wb_settings.OP_CONCURRENCY]: # type: ignore
futures.append(asyncio.ensure_future(
func(
dest_provider,
# TODO figure out a way to cut down on all the requests made here
(await self.revalidate_path(src_path, item.name, folder=item.is_folder)),
(await dest_provider.revalidate_path(dest_path, item.name, folder=item.is_folder)),
handle_naming=False,
)
))
if item.is_folder:
await futures[-1]
if not futures:
continue
done, _ = await asyncio.wait(futures, return_when=asyncio.FIRST_EXCEPTION)
for fut in done:
folder.children.append(fut.result()[0])
return folder, created
async def handle_naming(self,
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath,
rename: str=None,
conflict: str='replace') -> wb_path.WaterButlerPath:
"""Given a :class:`.WaterButlerPath` and the desired name, handle any potential naming issues.
i.e.:
::
cp /file.txt /folder/ -> /folder/file.txt
cp /folder/ /folder/ -> /folder/folder/
cp /file.txt /folder/file.txt -> /folder/file.txt
cp /file.txt /folder/file.txt -> /folder/file (1).txt
cp /file.txt /folder/doc.txt -> /folder/doc.txt
:param src_path: ( :class:`.WaterButlerPath` ) The object that is being copied
:param dest_path: ( :class:`.WaterButlerPath` ) The path that is being copied to or into
:param rename: ( :class:`str` ) The desired name of the resulting path, may be incremented
:param conflict: ( :class:`str` ) The conflict resolution strategy, ``replace`` or ``keep``
:rtype: :class:`.WaterButlerPath`
"""
if src_path.is_dir and dest_path.is_file:
# Cant copy a directory to a file
raise ValueError('Destination must be a directory if the source is')
if not dest_path.is_file:
# Directories always are going to be copied into
# cp /folder1/ /folder2/ -> /folder1/folder2/
dest_path = await self.revalidate_path(
dest_path,
rename or src_path.name,
folder=src_path.is_dir
)
dest_path, _ = await self.handle_name_conflict(dest_path, conflict=conflict)
return dest_path
def can_intra_copy(self,
other: 'BaseProvider',
path: wb_path.WaterButlerPath=None) -> bool:
"""Indicates if a quick copy can be performed between the current provider and `other`.
.. note::
Defaults to False
:param other: ( :class:`.BaseProvider` ) The provider to check against
:param path: ( :class:`.WaterButlerPath` ) The path of the desired resource
:rtype: :class:`bool`
"""
return False
def can_intra_move(self,
other: 'BaseProvider',
path: wb_path.WaterButlerPath=None) -> bool:
"""Indicates if a quick move can be performed between the current provider and `other`.
.. note::
Defaults to False
:param other: ( :class:`.BaseProvider` ) The provider to check against
:param path: ( :class:`.WaterButlerPath` ) The path of the desired resource
:rtype: :class:`bool`
"""
return False
async def intra_copy(self,
dest_provider: 'BaseProvider',
source_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath) -> typing.Tuple[wb_metadata.BaseFileMetadata, bool]:
"""If the provider supports copying files and/or folders within itself by some means other
than download/upload, then ``can_intra_copy`` should return ``True``. This method will
implement the copy. It accepts the destination provider, a source path, and the
destination path. Returns the metadata for the newly created file and a boolean indicating
whether the copied entity is completely new (``True``) or overwrote a previously-existing
file (``False``).
:param dest_provider: ( :class:`.BaseProvider` ) a provider instance for the destination
:param src_path: ( :class:`.WaterButlerPath` ) the Path of the entity being copied
:param dest_path: ( :class:`.WaterButlerPath` ) the Path of the destination being copied to
:rtype: (:class:`.BaseFileMetadata`, :class:`bool`)
"""
raise NotImplementedError
async def intra_move(self,
dest_provider: 'BaseProvider',
src_path: wb_path.WaterButlerPath,
dest_path: wb_path.WaterButlerPath) -> typing.Tuple[wb_metadata.BaseFileMetadata, bool]:
"""If the provider supports moving files and/or folders within itself by some means other
than download/upload/delete, then ``can_intra_move`` should return ``True``. This method
will implement the move. It accepts the destination provider, a source path, and the
destination path. | |
<reponame>luzsantamariag/terser<filename>meb/EmotionRecognition.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Dec 24 01:58:15 2019
@authors:
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
"""
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score
from tensorflow.keras.callbacks import ModelCheckpoint
import pandas as pd
import numpy as np
import os
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv1D, MaxPooling1D, Dropout
from tensorflow.keras.layers import Dense, LSTM, Flatten
from tensorflow.keras import optimizers
from tensorflow.keras.layers import GlobalAveragePooling1D
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as plt
from tensorflow.keras.utils import plot_model
class EmotionRecognition:
"""
Recognize the affective state of the experiment participants using HR vectors.
"""
def __init__(self, X, X_train, Y_train, X_val, Y_val):
""" Default constructor
"""
self.X = X
self.X_train = X_train
self.Y_train = Y_train
self.X_val = X_val
self.Y_val = Y_val
#%%
def create_directory(self, model_dir = 'models/'):
"""
Valid that the directory exists.
Parameters
----------
model_dir : TYPE, optional
DESCRIPTION. The default is 'models/'.
"""
if not os.path.exists(model_dir):
os.makedirs(model_dir)
#%%
def convert_vector(self, x):
"""
Converts a binary class matrix to class vector.
Parameters
----------
x : TYPE
DESCRIPTION.
Returns
-------
label_mapped : array
DESCRIPTION. Integer vector
"""
label_mapped = np.zeros((np.shape(x)[0]))
for i in range(np.shape(x)[0]):
max_value = max(x[i, : ])
max_index = list(x[i, : ]).index(max_value)
label_mapped[i] = max_index
return label_mapped.astype(np.int)
#%%
def create_model(self, number_of_classes = 4, filters = 128, kernel_size = [10, 4, 1, 1],
pool_size = [2, 1, 1], drop_rate = 0.5, neurons = [256, 128, 64],
model_filename = 'best_model.hdf5', history_filename = 'history.csv',
batch = 32, number_epochs = 50, modelType = 0):
"""
It creates an emotion recognition model based on DNN algorithms.
Parameters
----------
number_of_classes : int
DESCRIPTION. Number of labels classes
filters : int
DESCRIPTION. The dimensionality of the output space in the convolution
kernel_size : list
DESCRIPTION. Specify the length of the 1D CNN window
pool_size : list
DESCRIPTION. Size of the max-pooling windows for the operation of the CNN layers
drop_rate : int
DESCRIPTION. Define input unit less than 1
neurons : int
DESCRIPTION. Specify the dimensionality of the array output space for a FCN
model_filename : String
DESCRIPTION. Model file name
history_filename : String
DESCRIPTION. Accuracy results from all epochs of training and testing of the model
batch : int
DESCRIPTION. Batch size
number_epochs : int
DESCRIPTION. Define the iterations number on the dataset during the training time.
modelType : int
DESCRIPTION. DNN model
Returns
-------
None.
"""
if modelType == 0: # Test model 1: 1D CNN Fatten
self.model = Sequential()
self.model.add(Conv1D(filters, kernel_size[0], activation = 'relu',
input_shape = self.X.shape[1:])) # input_shape = self.X.shape[1:]
#self.model.add(Dropout(drop_rate))
self.model.add(MaxPooling1D(pool_size[0]))
self.model.add(Flatten())
self.model.add(Dense(neurons[0], kernel_initializer = 'normal', activation = 'relu'))
self.model.add(Dropout(drop_rate))
if modelType == 1: # Test model 2: 1D CNN LSTM
self.model = Sequential()
self.model.add(Conv1D(filters, kernel_size[0], activation='relu',
input_shape = self.X.shape[1:]))
self.model.add(MaxPooling1D(pool_size[0]))
self.model.add(Dropout(drop_rate))
self.model.add(LSTM(neurons[0] * 2, activation = 'relu'))
self.model.add(Dropout(drop_rate))
if modelType == 2: # Test model 3: DCNN
self.model = Sequential()
self.model.add(Conv1D(filters, kernel_size[0], activation = 'relu',
input_shape = self.X.shape[1:])) # input_shape = self.X.shape[1:]
self.model.add(MaxPooling1D(pool_size[0]))
self.model.add(Dropout(drop_rate))
self.model.add(Conv1D(filters, kernel_size[1], activation = 'relu'))
self.model.add(MaxPooling1D(pool_size[1]))
self.model.add(Dropout(drop_rate))
self.model.add(Conv1D(filters, kernel_size[2], activation = 'relu'))
self.model.add(MaxPooling1D(pool_size[2]))
self.model.add(Dropout(drop_rate))
self.model.add(Conv1D(filters, kernel_size[3], activation = 'relu'))
self.model.add(GlobalAveragePooling1D())
self.model.add(Dense(neurons[0], kernel_initializer = 'normal',
activation = 'relu'))
self.model.add(Dropout(drop_rate))
self.model.add(Dense(neurons[1], kernel_initializer = 'normal',
activation = 'relu'))
self.model.add(Dropout(drop_rate))
self.model.add(Dense(neurons[2], kernel_initializer = 'normal',
activation = 'relu'))
self.model.add(Dropout(drop_rate))
self.model.add(Dense(number_of_classes, kernel_initializer = 'normal',
activation = 'softmax'))
optimize = optimizers.Adam(lr=0.001)
self.model.compile(loss = 'categorical_crossentropy',
optimizer = optimize, metrics = ['accuracy'])
self.checkpointer = ModelCheckpoint(filepath = model_filename,
monitor = 'val_accuracy', verbose = 1,
save_best_only = True, mode='max')
self.hist = self.model.fit(self.X_train, self.Y_train,
validation_data = (self.X_val, self.Y_val),
batch_size = batch, epochs = number_epochs,
verbose = 2, shuffle = True,
callbacks = [self.checkpointer])
pd.DataFrame(self.hist.history).to_csv(path_or_buf = history_filename)
#%%
def run_model(self, number_of_classes = 4, filters = 128, kernel_sizes = [10, 4, 1, 1],
pool_sizes = [2, 1, 1], drop_rate = 0.5, neurons = [256, 128, 64],
model_dir ='models/', model_filename = 'model.hdf5',
history_filename = 'history.csv', batch = 32, number_epochs = 50,
pred_prefix = 'preds_', confusion_prefix = 'result_conf',
targetClass = ['HVHA', 'HVLA', 'LVLA', 'LVHA'],
emotionLabel = {0: 'HVHA',1: 'HVLA', 2:'LVLA', 3: 'LVHA'},
emotionMinAverage = 0.1, modelType = 0):
"""
It defines the parameters for emotion recognition with deep learning algorithms.
Parameters
----------
number_of_classes : int
DESCRIPTION. Number of labels classes
filters : int
DESCRIPTION. The dimensionality of the output space in the convolution
kernel_sizes : list
DESCRIPTION. Specify the length of the 1D CNN window
pool_size : list
DESCRIPTION. Size of the max-pooling windows for the operation of the CNN layers
drop_rate : int
DESCRIPTION. Define input unit less than 1
neurons : int
DESCRIPTION. Specify the dimensionality of the array output space for a FCN
model_dir : String
DESCRIPTION. Path of model subdirectory.
model_filename : String
DESCRIPTION. Model file name
history_filename : String
DESCRIPTION. Accuracy results from all epochs of training and testing of the model
batch : int
DESCRIPTION. Batch size
number_epochs : int
DESCRIPTION. Define the iterations number on the dataset during the training time.
pred_prefix : String
DESCRIPTION. The default is 'preds_'.
confusion_prefix : String
DESCRIPTION. The default is 'result_conf'.
targetClass : list
DESCRIPTION. Label class
emotionLabel : dictionary
DESCRIPTION. Label class
emotionMinAverage : float
DESCRIPTION. Minimum average prediction for emotion classes
modelType : int
DESCRIPTION. DNN model
Returns
-------
None.
"""
self.create_directory(model_dir)
self.create_model(number_of_classes, filters, kernel_sizes,
pool_sizes, drop_rate, neurons,
os.path.join(model_dir, model_filename),
os.path.join(model_dir, history_filename),
batch, number_epochs, modelType)
self.prediction(os.path.join(model_dir, pred_prefix),
os.path.join(model_dir, confusion_prefix))
self.plotMetrics(targetClass, model_dir, modelType)
self.predictionAverageCheck(emotionMinAverage, emotionLabel,
targetClass, history_filename, model_dir)
#%%
def prediction(self, pred_prefix = 'preds_', confusion_prefix = 'result_conf'):
"""
Verify the accuracy of the deep learning model for emotion recognition.
Parameters
----------
pred_prefix : String
DESCRIPTION. The default is 'preds_'. Filename prefix of the prediction results
confusion_prefix : String
DESCRIPTION. The default is 'result_conf'. Confusion matrix data
Returns
-------
None.
"""
self.predictions = self.model.predict(self.X_val)
self.score = accuracy_score(self.convert_vector(self.Y_val),
self.convert_vector(self.predictions))
print('Last epoch\'s validation score is ', self.score)
f1 = f1_score(self.convert_vector(self.Y_val),
self.convert_vector(self.predictions), pos_label = 1, average = 'micro')
print('The f1-score of classifier on test data is ', f1)
df = pd.DataFrame(self.convert_vector(self.predictions))
filename = pred_prefix + str(format(self.score, '.4f')) + '.csv'
df.to_csv(path_or_buf = filename, index = None, header = None)
filename = confusion_prefix + str(format(self.score, '.4f'))
filename += '.csv'
self.matrix = confusion_matrix(
self.convert_vector(self.Y_val), self.convert_vector(self.predictions))
pd.DataFrame(self.matrix).to_csv(
path_or_buf = filename, index = None, header = None)
#%%
def predictionAverageCheck(self, emotionMinAverage, emotionLabel,
targetClass, history_filename, model_dir):
"""
Verify the predicted average for emotion recognition.
Parameters
----------
emotionMinAverage : float
DESCRIPTION. Minimum average prediction for emotion classes
emotionLabel : dict
DESCRIPTION. The default is {0: 'feliz',1: 'divertido', 2:'alegre', 3: 'contento', 4:'satisfecho',...}. Emotion labels.
targetClass : list
DESCRIPTION. The default is ['feliz','divertido','alegre','contento','satisfecho',...]. Emotion labels.
history_filename : String
DESCRIPTION. Accuracy results from all epochs of training and testing of the model
model_dir : String
DESCRIPTION. Emotion recognition models subdirectory
Returns
-------
None.
"""
predHeader = {0:'Num', 1:'val_loss', 2:'val_accuracy', 3:'loss', 4:'accuracy'}
self.predResult = pd.read_csv(model_dir + history_filename, engine='python', header = None)
self.predResult.rename(index = str, columns = predHeader, inplace = True)
self.predResult = self.predResult.drop(self.predResult.index[0])
self.testAccMean = round(np.mean((self.predResult['val_accuracy'].values).astype(np.float)),2)
self.trainAccMean = round(np.mean((self.predResult['accuracy'].values).astype(np.float)),2)
temp=[]
self.emotionPrediction = []
for i in range(len(self.matrix)): # 2D array
total = 0
for j in range(len(self.matrix[i])):
total = (total + self.matrix[i][j]).astype(np.float)
if i == j:
value = (self.matrix[i][j]).astype(np.float)
pred = round(value / total, 2)
self.emotionPrediction.append(pred)
for k, v in emotionLabel.items():
if targetClass[i] == v and pred < emotionMinAverage:
temp.append(k)
break
self.emotionLabelTemp={}
for key, value in emotionLabel.items():
find = False
for emotion in range(len(temp)):
if key == temp[emotion]:
find = True
break
if not find:
self.emotionLabelTemp[key] = value
#%%
def plotMetrics(self, targetClass, model_dir, modelType):
"""
Precision result plots of emotion recognition.
Parameters
----------
targetClass : list
DESCRIPTION. The default is ['feliz','divertido','alegre','contento','satisfecho',...]. Emotion labels.
model_dir : string
DESCRIPTION. Emotion recognition models subdirectory
Returns
-------
None.
"""
if modelType == 0:
name = '1D CNN Fatten'
elif modelType == 1:
name = '1D CNN LSTM'
else: | |
self.x_max_2 + offset + self.spacing,
y_line)
term_indx += 1
def _add_top_channels(self):
"""
Creates horizontal channels
"""
term_indx = 0
for ele in self.chany_t:
chan = int(ele.attrib["index"])
# left to right channels
if not chan in [int(e.attrib["index"]) for e in self.ft_top]:
# Add connecting Vertical line
x_line = self.x_min_0 - term_indx*self.scale - self.spacing
y_line = self.y_max_1 + term_indx*self.scale + self.spacing
self.dwgShapes.add(shapes.Line(start=(x_line, self.y_min_0),
end=(x_line, self.y_max_4),
marker_start=self.marker_terminate.get_funciri(),
marker_end=self.marker_blue.get_funciri(),
class_="channel lr_chan"))
# Add connecting horizontal line
self.dwgShapes.add(shapes.Line(start=(x_line, y_line),
end=(self.x_max_0, y_line),
marker_start=self.marker_terminate.get_funciri(),
marker_end=self.marker_terminate.get_funciri(),
class_="channel lr_chan"))
self._add_short_at(x_line, y_line)
# Add Text
self.dwgText.add(Text(ele.attrib["index"],
transform="scale(1,-1)",
class_="lr_text",
insert=(x_line, -1*self.y_max_4)))
# Add Switches
for switch in ele.getchildren():
sw_type = switch.attrib["type"]
side = switch.attrib["side"]
index = int(switch.attrib["index"])
grid_side = switch.attrib.get("grid_side", "")
offset = index*self.scale
if sw_type == "CHANX":
self._add_switch_at(
x_line, self.x_min_0 + offset + self.spacing)
elif sw_type == "CHANY":
self._add_switch_at(
self.y_min_0 + offset + self.spacing, y_line)
elif sw_type == "OPIN":
self._add_switch_at(
x_line,
self.y_max_2 + offset + self.spacing)
term_indx += 1
def _add_bottom_channels(self):
"""
Creates horizontal channels
"""
term_indx = 0
for ele in self.chany_b:
chan = int(ele.attrib["index"])
# left to right channels
if not chan in [int(e.attrib["index"]) for e in self.ft_bottom]:
# Add connecting Vertical line
x_line = self.x_max_0 + term_indx*self.scale + self.spacing
y_line = self.y_min_1 - term_indx*self.scale - self.spacing
self.dwgShapes.add(shapes.Line(start=(x_line, self.y_max_0),
end=(x_line, self.y_min_4),
marker_start=self.marker_terminate.get_funciri(),
marker_end=self.marker_blue.get_funciri(),
class_="channel lr_chan"))
# Add connecting horizontal line
self.dwgShapes.add(shapes.Line(start=(x_line, y_line),
end=(self.x_min_0, y_line),
marker_start=self.marker_terminate.get_funciri(),
marker_end=self.marker_terminate.get_funciri(),
class_="channel lr_chan"))
self._add_short_at(x_line, y_line)
# Add Text
self.dwgText.add(Text(ele.attrib["index"],
transform="scale(1,-1)",
class_="lr_text",
insert=(x_line, -1*self.y_min_4)))
# Add Switches
for switch in ele.getchildren():
sw_type = switch.attrib["type"]
side = switch.attrib["side"]
index = int(switch.attrib["index"])
grid_side = switch.attrib.get("grid_side", "")
offset = index*self.scale
if sw_type == "CHANX":
self._add_switch_at(
x_line, self.x_min_0 + offset + self.spacing)
elif sw_type == "CHANY":
self._add_switch_at(
self.y_min_0 + offset + self.spacing, y_line)
elif sw_type == "OPIN":
self._add_switch_at(
x_line,
self.y_min_2 - offset - self.spacing)
term_indx += 1
def _add_channels(self):
"""
Adds horizontal driver lines
"""
pass_through = {"%s%d" % (ele[0].attrib["side"], int(ele[0].attrib["index"])): int(ele.attrib["index"])
for ele in self.ft_left+self.ft_right+self.ft_top+self.ft_bottom}
visited_pins = list()
for ele in (self.chanx_drivers+self.chany_drivers):
index = int(ele.attrib["index"])
side = ele.attrib["side"]
offset_x = self.x_min_0+self.spacing + index*self.scale
offset_y = self.y_min_0+self.spacing + index*self.scale
curr_pin = "%s%d" % (side, index)
if curr_pin in visited_pins:
continue
# Create side specific parameters
if side == "left":
marker = self.marker_blue
class_name = "lr"
offset = offset_x
start = (self.x_min_4, offset)
end = (self.x_max_4 if pass_through.get(curr_pin, None) else self.x_max_2,
offset)
if pass_through.get(curr_pin, None):
self.chanx_r_out_map[pass_through[curr_pin]] = offset
self.chanx_l_out_map[index] = offset
elif side == "right":
marker = self.marker_red
class_name = "rl"
offset = offset_x
start = (self.x_max_4, offset)
end = (self.x_min_4 if pass_through.get(curr_pin, None) else self.x_min_2,
offset)
if pass_through.get(curr_pin, None):
self.chanx_l_out_map[pass_through[curr_pin]] = offset
self.chanx_r_out_map[index] = offset
elif side == "top":
marker = self.marker_blue
class_name = "tb"
offset = offset_y
start = (offset, self.y_max_4)
end = (offset,
self.y_min_4 if pass_through.get(curr_pin, None) else self.y_min_2)
if pass_through.get(curr_pin, None):
self.chany_b_out_map[pass_through[curr_pin]] = offset
self.chany_t_out_map[index] = offset
elif side == "bottom":
marker = self.marker_red
class_name = "bt"
offset = offset_y
start = (offset, self.y_min_4)
end = (offset,
self.y_max_4 if pass_through.get(curr_pin, None) else self.y_max_2)
if pass_through.get(curr_pin, None):
self.chany_t_out_map[pass_through[curr_pin]] = offset
self.chany_b_out_map[index] = offset
self.dwgShapes.add(shapes.Line(start=start, end=end,
marker_start=marker.get_funciri(),
marker_end=marker.get_funciri(),
class_=f"channel {class_name}_chan"))
self.dwgText.add(Text(index,
transform="scale(1,-1)",
class_=f"in_pin {class_name}_text",
insert=(start[0], -1*start[-1])))
if pass_through.get(curr_pin, None):
self.dwgText.add(Text(pass_through[curr_pin],
transform="scale(1,-1)",
class_=f"out_pin {class_name}_text",
insert=(end[0], -1*end[-1])))
visited_pins.append(curr_pin)
def _add_ipins(self, side="left", channel_map=None):
channel_map = channel_map or (lambda side, x: x)
if side == "left":
ipins = self.ipin_t + self.ipin_b
else:
ipins = self.ipin_r + self.ipin_l
for ele in ipins:
index = int(ele.attrib["index"])
side = ele.attrib["side"]
marker = self.marker_red
offset = self.spacing + channel_map(side, index)*self.scale
if side in "top":
start = (self.x_min_3 - offset, self.y_min_2)
end = (self.x_min_3 - offset, self.y_max_3)
elif side in "bottom":
start = (self.x_min_4 + offset, self.y_max_2)
end = (self.x_min_4 + offset, self.y_min_3)
elif side in "left":
start = (self.x_max_2, self.y_max_3 + offset)
end = (self.x_min_3, self.y_max_3 + offset)
elif side in "right":
start = (self.x_min_2, self.y_max_4 - offset)
end = (self.x_max_3, self.y_max_4 - offset)
self.dwgShapes.add(shapes.Line(start=start, end=end,
marker_start=marker.get_funciri(),
marker_end=marker.get_funciri(),
class_="channel"))
self.dwgText.add(Text(index,
transform="scale(1,-1)",
class_=f"OPIN",
insert=(end[0], -1*end[-1])))
# Add Switches
for switch in ele.getchildren():
index = int(switch.attrib["index"])
offset = self.chanx_l_out_map[index] if side in ['top', 'bottom'] \
else self.chany_t_out_map[index]
if side in ["top", "bottom"]:
self._add_switch_at(
start[0], offset)
elif side in ["left", "right"]:
self._add_switch_at(
offset, start[1])
def _add_opins(self):
for ele in self.opin_l + self.opin_r + self.opin_t + self.opin_b:
index = int(ele.attrib["index"])
side = ele.attrib["side"]
grid_side = ele.attrib["grid_side"]
offset = self.spacing + (index)*self.scale
if side in ["left", "right"]:
start = (self.x_min_2-offset if side == "left" else self.x_max_2+offset,
self.y_min_4 if grid_side == "top" else self.y_min_1)
end = (self.x_min_2-offset if side == "left" else self.x_max_2+offset,
self.y_max_1 if grid_side == "top" else self.y_max_4)
if grid_side == "bottom":
start, end = end, start
marker = self.marker_red
elif side in ["top", "bottom"]:
start = (self.x_max_4 if grid_side == "left" else self.x_min_4,
self.y_min_2-offset if side == "bottom" else self.y_max_2+offset)
end = (self.x_min_2 if grid_side == "left" else self.x_max_2,
self.y_min_2-offset if side == "bottom" else self.y_max_2+offset)
marker = self.marker_red
self.dwgShapes.add(shapes.Line(start=start, end=end,
marker_start=marker.get_funciri(),
marker_end=marker.get_funciri(),
class_="channel"))
self.dwgText.add(Text(index,
transform="scale(1,-1)",
class_=f"OPIN",
insert=(start[0], -1*start[-1])))
def _add_switch_at(self, x, y):
self.switches.add(shapes.Circle(center=(x, y), r=10, class_="switch"))
def _add_short_at(self, x, y):
self.switches.add(shapes.Circle(center=(x, y), r=10, class_="short"))
def add_partitions(self):
"""
This function creates initial floorplan for rendering switch boxes
┌───────────────────────────────┐
│ │
│ ┌───────────────────────────┐ │
│ │ │ │
│ │ ┌──────────────────────┐ │ │
│ │ │ │ │ │
│ │ │ ┌──────────────────┐ │ │ │
│ │ │ │ │ │ │ │
│ │ │ │Channel_CrossOver │ │ │ │
│ │ │ │ │ │ │ │
│ │ │ └──────────────────┘ │ │ │
│ │ │ Driver_Channels_1 │ │ │
│ │ └──────────────────────┘ │ │
│ │ Driver_Channels_2 │ │
│ └───────────────────────────┘ │
│ OPINs_1 │
└───────────────────────────────┘
``Channel_CrossOver`` and ``Driver_Channels_1`` section always exists.
``Driver_Channels_2`` on specific site can be skipped if there are no
channels drivers in that direction
"""
min_terminating = min(self.ft_left_len, self.ft_right_len,
self.ft_top_len, self.ft_bottom_len)
# Width, Height Calculation for
width = self.chanx_len*self.scale + 2*self.spacing
height = self.chany_len*self.scale + 2*self.spacing
# width1 height1 calculation
width1 = width + 2*(self.chanx_l_len-min_terminating)*self.scale
width1 += 2*self.spacing
height1 = height + 2*(self.chany_t_len-min_terminating)*self.scale
height1 += 2*self.spacing
# width1 height1 calculation
width2 = width1 + 2*(self.chanx_l_len-min_terminating)*self.scale \
+ 2*self.spacing
height2 = height1 + 2*(self.chany_t_len-min_terminating)*self.scale \
+ 2*self.spacing
# width1 height1 calculation
width3 = width2 + 2*(self.chanx_l_len-min_terminating)*self.scale
width3 += 2*self.spacing
height3 = height2 + 2*(self.chany_t_len-min_terminating)*self.scale
height3 += 2*self.spacing
# width1 height1 calculation
width4 = width3 + 2*(self.ipin_l_len+self.ipin_r_len)*self.scale
width4 += 4*self.spacing
height4 = height3 + 2*(self.ipin_t_len+self.ipin_b_len)*self.scale
height4 += 4*self.spacing
insert_pt = -0.5*(width4-width), -0.5*(height4-height)
self.x_min_4, self.y_min_4 = insert_pt
self.x_min_4 += (self.ipin_r_len-self.ipin_l_len)*self.scale
self.y_min_4 += (self.ipin_t_len-self.ipin_b_len)*self.scale
self.x_max_4, self.y_max_4 = self.x_min_4+width4, self.y_min_4+height4
insert_pt = -0.5*(width3-width), -0.5*(height3-height)
self.x_min_3, self.y_min_3 = insert_pt
self.x_max_3, self.y_max_3 = self.x_min_3+width3, self.y_min_3+height3
insert_pt = -0.5*(width2-width), -0.5*(height2-height)
self.x_min_2, self.y_min_2 = insert_pt
self.x_max_2, self.y_max_2 = self.x_min_2+width2, self.y_min_2+height2
insert_pt = -0.5*(width1-width), -0.5*(height1-height)
self.x_min_1, self.y_min_1 = insert_pt
self.x_max_1, self.y_max_1 = self.x_min_1+width1, self.y_min_1+height1
self.x_min_0, self.y_min_0 = 0, 0
self.x_max_0, self.y_max_0 = width, height
self.region.add(shapes.Rect(insert=(self.x_min_3, self.y_min_4),
size=(width3, height4),
class_="boundry"))
self.region.add(shapes.Rect(insert=(self.x_min_4, self.y_min_3),
size=(width4, height3),
class_="boundry"))
self.region.add(shapes.Rect(insert=(self.x_min_3, self.y_min_3),
size=(width3, height3),
class_="region4"))
self.region.add(shapes.Rect(insert=(self.x_min_2, self.y_min_2),
size=(width2, height2),
class_="region3"))
self.region.add(shapes.Rect(insert=(self.x_min_1, self.y_min_1),
size=(width1, height1),
class_="region2"))
self.region.add(shapes.Rect(insert=(0, 0),
size=(width, height),
class_="region1"))
def _add_origin_marker(self):
self.dwgbg.add(shapes.Line(start=(0, 1*self.scale),
end=(0, -1*self.scale),
class_="origin"))
self.dwgbg.add(shapes.Line(start=(1*self.scale, 0),
end=(-1*self.scale, 0),
class_="origin"))
def _create_arrowhead(self, hex_color):
DRMarker = self.dwg.marker(refX="30", refY="30",
viewBox="0 0 120 120",
markerUnits="strokeWidth",
markerWidth="8", markerHeight="10", orient="auto")
DRMarker.add(self.dwg.path(d="M 0 0 L 60 30 L 0 60 z", fill=hex_color))
self.dwg.defs.add(DRMarker)
return DRMarker
def _create_termination(self, hex_color):
DRMarker = self.dwg.marker(refX="0", refY="0",
viewBox="-10 -30 20 60",
markerUnits="strokeWidth",
markerWidth="4", markerHeight="10", orient="auto")
DRMarker.add(shapes.Rect(insert=(-5, -15), height="30",
width="10", fill=hex_color))
self.dwg.defs.add(DRMarker)
return DRMarker
def _add_stylehseet(self):
'''
Adds custom stylesheet to the SVG image
'''
self.dwg.defs.add(self.dwg.style("""
text{font-family: LATO; font-weight: 800; font-size: 25px;}
svg{outline: 1px solid grey; outline-offset: -2px;}
.module_boundary{fill:#f4f0e6}
.origin{stroke: red; stroke-width: 1;}
.channel{stroke: grey; stroke-width: 4;}
.switch{stroke: black; fill:blue; stroke-width: 0;}
.short{stroke: black; fill:black; stroke-width: 0;}
.inpin{stroke: red;stroke-width: 4;}
.outpin{stroke: green;stroke-width: 4;}
.rl_chan{stroke: red;}
.lr_chan{stroke: blue;}
.lr_text{fill: blue;}
.rl_text{fill: red;}
.bt_chan{stroke: red;}
.bt_text{fill: red;}
.tb_chan{stroke: blue;}
.tb_text{fill: blue;}
.region1{fill: #CCE7D4;}
.region2{fill: #F8AC92;}
.region3{fill: #C4E7EB;}
.region4{fill: #F5F3C9;}
.boundry{stroke: red;stroke-width: 10;fill: none;opacity: 10%;}
.OPIN{fill: green;}
.left_pin{
fill:blue;
text-anchor: start;
transform: translate(5px, 00px) scale(1,-1);}
.right_pin{
fill:blue;
text-anchor: end;
| |
#!/usr/bin/env python
"""
Enumerate subgraphs & get amons
"""
import aqml.cheminfo as ci
import aqml.cheminfo.math as cim
import aqml.cheminfo.graph as cg
import aqml.cheminfo.openbabel.amon_f as cioaf
from aqml.cheminfo.rw.ctab import write_ctab
import networkx as nx
#import aqml.cheminfo.fortran.famon as fm
from itertools import chain, product
import numpy as np
import os, re, copy
#from rdkit import Chem
import openbabel as ob
import pybel as pb
global dic_smiles
dic_smiles = {6:'C', 7:'N', 8:'O', 14:'Si', 15:'P', 16:'S'}
class vars(object):
def __init__(self, bosr, zs, chgs, tvs, g, coords):
self.bosr = bosr
self.zs = zs
self.chgs = chgs
self.tvs = tvs
self.g = g
self.coords = coords
class MG(vars):
def __init__(self, bosr, zs, chgs, tvs, g, coords, use_bosr=True):
vars.__init__(self, bosr, zs, chgs, tvs, g, coords)
self.use_bosr = use_bosr
def update_m(self, once=True, debug=False):
g = self.g
chgs = self.chgs
bosr = self.bosr
tvs = self.tvs # `tvs has been modified according to `chgs
zs = self.zs
na = len(zs)
ias = np.arange(na)
bom = copy.deepcopy(g) # set bom as `g
cns = g.sum(axis=0)
vs = bom.sum(axis=0)
dvs = tvs - vs
# print ' vs = ', vs
# print ' tvs = ', tvs
# print ' cns = ', cns
# 1) for =O, =S
ias_fringe = ias[ np.logical_and(dvs == 1, \
np.logical_or(self.zs == 8, self.zs == 16) ) ]
for ia in ias_fringe:
jas = ias[ g[ia] > 0 ]
ja = jas[0]
bom[ia, ja] = bom[ja,ia] = 2
vs = bom.sum(axis=0)
dvs = tvs - vs
# if iprt: print ' *** dvs1 = ', dvs
# 2) for #N
ias_fringe = ias[ np.logical_and(dvs == 2, self.zs == 7) ]
for ia in ias_fringe:
jas = ias[ g[ia] > 0 ]
ja = jas[0]
bom[ia, ja] = bom[ja,ia] = 3
vs = bom.sum(axis=0)
dvs = tvs - vs
# 3) for $C (quadruple bond)
ias_fringe = ias[ np.logical_and(dvs == 3, self.zs == 6) ]
for ia in ias_fringe:
jas = ias[ g[ia] > 0 ]
ja = jas[0]
bom[ia, ja] = bom[ja,ia] = 4
vs = bom.sum(axis=0)
dvs = tvs - vs
# print ' -- dvs = ', dvs
## 4) fix special cases, where we consider
# the middle N has a valence of 5
## -N-N-N --> -N=N#N
## >C-N-N --> >C=N#N
# ==============================
# now it's not necessary to do this
# as step 2) and step below suffice
## 5) fix special cases
## -C(O)O --> -C(=O)O
# ==============================
# necessary only for charged species
# 6) fix cases like >C-C-C< or =C-C-C< or -N-C-C< (dvs=[1,2,1])
# >C-C-C-C< (dvs=[1,2,2,1])
# >C-C-C-C-C< (dvs=[1,2,2,2,1])
# cases like >C-C(-X)-C-C-C(-X)-C< must be excluded (note
# that [1,2,2,1] is a subset of `dvs for all atoms)
filt1 = (dvs == 1)
zs1 = zs[filt1]
ias1 = ias[filt1]; na1 = len(zs1)
filt2 = (dvs == 2)
zs2 = zs[filt2]
ias2 = ias[filt2]; na2 = len(zs2)
if na2 > 0:
g2 = bom[ias2,:][:,ias2]
for ias2c_raw in cg.find_cliques(g2):
ias2c = [ ias2[ja] for ja in ias2c_raw ]
# store the index of atom with dv=1, which
# is connected to atom with dv=2
ias1c = []
for ia2 in ias2c:
for ia1 in ias1:
if bom[ia1,ia2] == 1:
ias1c.append( ia1 )
# now sort atoms to form a Line
na1c = len(ias1c)
if na1c == 2:
# now sort atoms to form a linear chain !
iasc = [ ias1c[0], ]; icnt = 0
ias_compl = copy.copy( ias2c )
while ias_compl:
for ial in ias_compl:
if bom[ial, iasc[icnt]] == 1:
iasc.append( ial )
icnt += 1
ias_compl.remove( ial )
iasc.append( ias1c[1] )
nac = len(iasc)
# now check if the two end atoms along this Line
# are not connected to another atom with `dv=1,
# e.g., >C-C(-X)-C-C-C(-X)-C<
icontinue = True
for iax in ias1:
if iax not in iasc:
if np.any([bom[iax,iac] == 1 for iac in iasc ]):
icontinue = False
if icontinue:
for iac in range(nac-1):
ka1 = iasc[iac]; ka2 = iasc[iac+1]
bom[ka1,ka2] = bom[ka2,ka1] = 2
# now update `dvs
vs = bom.sum(axis=0)
dvs = tvs - vs
# if iprt: print ' ***** dvs = ', dvs
combs = [ ]; bos = [ ]
nclique = 0
iok = True # assume valences are all ok for atoms
for dv in [2, 1]:
# for C_sp2, dv = 4-3 = 1, N_sp2, dv = 3-2 = 1;
# for C_sp, dv = 4-2 = 2
# for O_sp2, dv = 2-1 = 1
BO = {2: 3, 1: 2}[dv]
# atoms to be re-bonded by double/triple bonds
ias_dv = ias[ dvs == dv ]
na_dv = len(ias_dv)
if na_dv > 0:
g2 = g[ias_dv,:][:,ias_dv]
cliques_dv = cg.find_cliques(g2)
# print ' ***** dv, cliques_dv = ', dv, cliques_dv
# nclique_dv = len(cliques_dv)
# nclique += nclique_dv
for cliques_i in cliques_dv:
nc = len(cliques_i)
# e.g., for 'cccNccncc', cliques = [ [0,1,2],
# [4,5,6,7,8], ], nc = 2
# for clique_1, there are C_2^1 (where 2 is
# the num_bonds) possibilities,
# i.e., c=cc, cc=c; for clique_2, there are
# 2 possiblities as well.
# Thus, we'll have 2*2 = 4 possible combs,
nc_i = len(cliques_i)
if nc_i%2 == 1: iok = False; continue
# ifound = True
# if nc_i == 1:
# ifound = False
if nc_i == 2:
# use relative indices; later I will convert
# them to absolute ones
ipss_dv_0 = [ [[0,1],], ]
else: # nc_i >= _dv:
g2c = g2[cliques_i,:][:,cliques_i]
#print ' g2c = ', g2c
#ne = (g2c > 0).sum()/2
#nring = ne + 1 - nc_i
#
# special case: 4-1-2-3
# but atomic indices are 1,2,3,4
# consequently, g2c = [[0 1 0 1]
# [1 0 1 0]
# [0 1 0 0]
# [1 0 0 0]]
# while the program `find_double_bonds will
# try to find double bonds in a conjugate systems
# sequentially, i.e., the resulting `ipss_dv_0 is [[0,1], ]
# which should be instead [ [4,1],[2,3] ]
#
# Thus, `once should be set to False when calling
# `find_double_bonds
ipss_dv = find_double_bonds(g2c, once=False, irad=False)
n_ = len(ipss_dv)
if n_ == 0:
# e.g., edges = [[3,1],[3,2],[3,4]] and dvs = [1,1,1,1]
iok = False; continue
nmax_ = 2
# print '\n *** dv, cliques_i = ', dv, cliques_i
# print ' *** g2c = ', g2c
# print ' *** nc_i, n_ = ', nc_i, n_
for i_ in range(n_):
ias_ = ipss_dv[i_]
ni_ = len(np.ravel(ias_))
# print ' *** i_, ni_, ias_ = ', i_, ni_, ias_
if ni_ >= nmax_:
nmax_ = ni_
i_chosen = i_
# print ' *** ', i_chosen, ipss_dv[ i_chosen ]
if once:
# choose only 1 from all options
ipss_dv_0 = [ ipss_dv[ i_chosen ], ]
else:
ipss_dv_0 = ipss_dv
#if ifound:
map_i = list( np.array(ias_dv, np.int)[cliques_i] )
bonds_dv_i = [ ]
for iips in ipss_dv_0:
cisU = [ [ map_i[jas[0]], map_i[jas[1]] ] \
for jas in iips ]
bonds_dv_i.append( cisU )
combs.append( bonds_dv_i )
bos.append( BO )
_boms = []
if iok:
if len(combs) > 0: # nclique >= 1:
for bs in cim.products(combs):
bom_i = copy.copy(bom)
for i,bsi in enumerate(bs):
for bi in bsi:
ia1, ia2 = bi
bom_i[ia1,ia2] = bom_i[ia2,ia1] = bos[i]
_boms.append(bom_i)
else:
_boms.append( bom )
cans = []; ms = []
for bom in _boms:
# note that the order of calling `get_bos() and `accommodate_chgs()
# matters as `bosr was obtained based on modified `bom, i.e., all
# pairs of charges (the relevant two atoms are bonded) were eliminated
bos = cioaf.get_bos(bom)
# now restore charges for case, e.g., NN bond in C=N#N
bom_U = cioaf.accommodate_chgs(chgs, bom)
# for query molecule like -C=CC#CC=C-, one possible amon
# is >C-C-C-C< with dvs = [1,2,2,1] ==> >C=C=C=C<, but
# apparently this is not acceptable!!
if self.use_bosr:
if np.any(bos[zs>1] != bosr):
#print ' bosr = ', bosr, ', bosr0 = ', bos[zs>1]
continue
isotopes = []
zsmv = [7,15,16,17]
vsn = [3,3,2,1]
print ' -- zs = ', zs
zsc = np.intersect1d(zs, zsmv)
if zsc.shape[0] > 0:
| |
<reponame>turkeydonkey/nzmath3
import unittest
from nzmath.matrix import *
import nzmath.vector as vector
import nzmath.rational as rational
import nzmath.poly.uniutil as uniutil
Ra = rational.Rational
Poly = uniutil.polynomial
Int = rational.theIntegerRing
# sub test
try:
from test.testMatrixFiniteField import *
except:
try:
from nzmath.test.testMatrixFiniteField import *
except:
from .testMatrixFiniteField import *
## for RingMatrix
a1 = createMatrix(1, 2, [3, 2])
a2 = Matrix(1, 2, [5, -6])
a3 = createMatrix(3, 2, [7, 8]+[3, -2]+[0, 10])
a4 = Matrix(3, 2, [21, -12]+[1, -1]+[0, 0])
a5 = createMatrix(1, 2, [Poly({0:3, 1:5}, Int), Poly({1:2}, Int)])
## for RingSquareMatrix
b1 = createMatrix(2, 2, [1, 2]+[3, 4])
b2 = Matrix(2, 2, [0, -1]+[1, -2])
b3 = createMatrix(3, 3, [0, 1, 2]+[5, 4, 6]+[7, 9, 8])
b4 = Matrix(3, 3, [1, 2, 3]+[0, 5, -2]+[7, 1, 9])
b5 = createMatrix(3, 3, [1, 3, 2, 4, 6, 5, 6, 8, 9])
b6 = createMatrix(3, 3, [1, 2, 4, 0, 3, 5, 0, 0, 0])
b7 = createMatrix(3, 3, [1, 0, 0, 9, 1, 0, 5, 6, 1])
b8 = Matrix(3, 3, [3, 15, 12]+[2,7,5]+[1,-4,-2])
## for FieldMatrix
c1 = createMatrix(1, 2, [Ra(3), Ra(2)])
c2 = createMatrix(4, 5, \
[Ra(0), 0, 1, 2, -1]+[0, 0, 5, 12, -2]+[0, 0, 1, 3, -1]+[0, 0, 1, 2, 0])
c3 = createMatrix(3, 2, [Ra(1), 2]+[2, 5]+[6, 7])
## for FieldSquareMatrix
d1 = createMatrix(2, 2, [Ra(1), Ra(2)]+[Ra(3), Ra(4)])
d2 = createMatrix(3, 3, [Ra(1), 2, 3]+[4, 5, 6]+[5, 7, 9])
d3 = Matrix(3, 3, \
[Ra(1), Ra(2), Ra(3)]+[Ra(0), Ra(5), Ra(-2)]+[7, 1, 9])
d4 = createMatrix(6, 6, \
[Ra(4), 2, 5, 0, 2, 1]+[5, 1, 2, 5, 1, 1]+[90, 7, 54, 8, 4, 6]+\
[7, 5, 0, 8, 2, 5]+[8, 2, 6, 5, -4, 2]+[4, 1, 5, 6, 3, 1])
d5 = createMatrix(4, 4, \
[Ra(2), -1, 0, 0]+[-1, 2, -1, 0]+[0, -1, 2, -1]+[0, 0, -1, 2])
d6 = createMatrix(4, 4, \
[Ra(1), 2, 3, 4]+[2, 3, 4, 5]+[3, 4, 5, 6]+[4, 5, 6, 7])
d7 = Matrix(3, 3, \
[Ra(1, 2), Ra(2, 3), Ra(1, 5)]+[Ra(3, 2), Ra(1, 3), Ra(2, 5)]+[Ra(-1, 2), Ra(4, 3), Ra(3, 5)])
## other objects
v1 = vector.Vector([1, 4])
v2 = vector.Vector([8])
v3 = vector.Vector([0, 0, 1])
class MatrixTest(unittest.TestCase):
def testInit(self):
lst_lst = Matrix(3, 2, [[21, -12], [1, -1], [0, 0]])
self.assertEqual(a4, lst_lst)
lst_tuple = Matrix(3, 2, [(21, 1, 0), (-12, -1, 0)])
self.assertEqual(a4, lst_tuple)
lst_vect = Matrix(3, 2, [vector.Vector([21, 1, 0]), vector.Vector([-12, -1, 0])])
self.assertEqual(a4, lst_vect)
def testGetitem(self):
self.assertEqual(2, a1[1, 2])
self.assertEqual(-2, b2[2, 2])
self.assertRaises(IndexError, a1.__getitem__, "wrong")
self.assertEqual(vector.Vector([21, 1, 0]), a4[1])
def testEqual(self):
self.assertTrue(a1 == Matrix(1, 2, [3, 2]))
self.assertTrue(isinstance(a1 == a1, bool))
def testNonZero(self):
self.assertTrue(not zeroMatrix(2, 3))
def testContains(self):
self.assertTrue(5 in a2)
def testCall(self):
call = createMatrix(1, 2, [13, 4])
self.assertEqual(call, a5(2))
def testMap(self):
pow_two = createMatrix(1, 2, [9, 4])
self.assertEqual(pow_two, a1.map(lambda n : n ** 2))
def testReduce(self):
self.assertEqual(-2, a3.reduce(min))
def testGetRow(self):
row1 = vector.Vector([3, -2])
self.assertEqual(row1, a3.getRow(2))
row2 = vector.Vector([1, 2])
self.assertEqual(row2, b1.getRow(1))
def testGetColumn(self):
col1 = vector.Vector([-12, -1, 0])
self.assertEqual(col1, a4.getColumn(2))
col2 = vector.Vector([1, 3])
self.assertEqual(col2, b1.getColumn(1))
def testTranspose(self):
trans = createMatrix(2, 3, [7, 3, 0]+[8, -2, 10])
self.assertEqual(trans, a3.transpose())
def testGetBlock(self):
block = Matrix(2, 3, [4, 6, 5, 6, 8, 9])
self.assertEqual(block, b5.getBlock(2, 1, 2, 3))
def testSubMatrix(self):
sub1 = createMatrix(2, 1, [-12, 0])
self.assertEqual(sub1, a4.subMatrix(2, 1))
sub2 = createMatrix(2, 2, [4, 5, 6, 9])
self.assertEqual(sub2, b5.subMatrix([2, 3], [1, 3]))
class SquareMatrixTest(unittest.TestCase):
def testIsUpperTriangularMatrix(self):
UT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 0, 1])
notUT = createMatrix(4, 4, \
[1, 2, 3, 4]+[0, 5, 6, 7]+[0, 0, 8, 9]+[0, 0, 1, 1])
assert UT.isUpperTriangularMatrix()
assert not notUT.isUpperTriangularMatrix()
def testIsLowerTriangularMatrix(self):
LT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 0, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
notLT = createMatrix(4, 4, \
[1, 0, 0, 0]+[2, 3, 1, 0]+[4, 5, 6, 0]+[7, 8, 9, 10])
assert LT.isLowerTriangularMatrix()
assert not notLT.isLowerTriangularMatrix()
def testIsDiagonalMatrix(self):
diag = createMatrix(2, 2, [-3, 0, 0, 5])
assert diag.isDiagonalMatrix()
def testIsScalarMatrix(self):
scaler = createMatrix(2, 2, [10, 0, 0, 10])
assert scaler.isScalarMatrix()
def testIsSymmetricMatrix(self):
symmetric = createMatrix(2, 2, [2, 3, 3, 5])
assert symmetric.isSymmetricMatrix()
class RingMatrixTest(unittest.TestCase):
def testAdd(self):
sum1 = createMatrix(1, 2, [8, -4])
self.assertEqual(sum1, a1 + a2)
sum2 = createMatrix(2, 2, [1, 1, 4, 2])
self.assertEqual(sum2, b1 + b2)
def testSub(self):
sub1 = createMatrix(1, 2, [-2, 8])
self.assertEqual(sub1, a1 - a2)
sub2 = createMatrix(2, 2, [1, 3, 2, 6])
self.assertEqual(sub2, b1 - b2)
def testMul(self):
mul1 = createMatrix(1, 2, [2, -7])
self.assertEqual(mul1, a1 * b2)
mul2 = createMatrix(3, 2, [-15, -6]+[-2, -2]+[0, 0])
self.assertEqual(mul2, a4 * b1)
mul3 = createMatrix(3, 2, [1, -1]+[109, -64]+[156, -93])
self.assertEqual(mul3, b3 * a4)
def testScalarMul(self):
mul = createMatrix(1, 2, [15, 10])
self.assertEqual(mul, 5 * a1)
def testVectorMul(self):
mul = vector.Vector([9, 19])
self.assertEqual(mul, b1 * v1)
def testMod(self):
mod1 = createMatrix(3, 2, [1, 2]+[0, 1]+[0, 1])
self.assertEqual(mod1, a3 % 3)
def testNeg(self):
neg = createMatrix(2, 2, [0, 1, -1, 2])
self.assertEqual(neg, -b2)
def testHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
h = already.hermiteNormalForm()
self.assertEqual(h, already)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
h = lessrank.hermiteNormalForm()
self.assertEqual(h.row, lessrank.row)
self.assertEqual(h.column, lessrank.column)
zerovec = vector.Vector([0, 0])
self.assertEqual(zerovec, h.getColumn(1))
square = createMatrix(3, 3, [1, 0, 0, 0, 1, 1, 0, 1, 1])
h = square.hermiteNormalForm()
self.assertEqual(h.row, square.row)
self.assertEqual(h.column, square.column)
hermite = createMatrix(3, 3, [0, 1, 0, 0 ,0, 1, 0, 0, 1])
self.assertEqual(hermite, h)
def testExtHermiteNormalForm(self):
already = createMatrix(4, 3, [1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1])
U_1, h_1 = already.exthermiteNormalForm()
self.assertEqual(h_1, already)
self.assertEqual(already * U_1, h_1)
lessrank = createMatrix(2, 3, [1, 0, 0, 0, 1, 0])
U_2, h_2 = lessrank.exthermiteNormalForm()
self.assertEqual(h_2.row, lessrank.row)
self.assertEqual(h_2.column, lessrank.column)
self.assertEqual(lessrank * U_2, h_2)
def testKernelAsModule(self):
ker_1 = a1.kernelAsModule()
self.assertEqual(a1 * ker_1[1], vector.Vector([0]))
#zero test
ker_2 = b1.kernelAsModule()
self.assertEqual(ker_2, None)
class RingSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow1 = createMatrix(2, 2, [7, 10, 15, 22])
self.assertEqual(pow1, b1 ** 2)
pow2 = createMatrix(2, 2, [1, 0, 0, 1])
self.assertEqual(pow2, b2 ** 0)
def testIsOrthogonalMatrix(self):
orthogonal = createMatrix(2, 2, [Ra(3, 5), Ra(4, 5), Ra(-4, 5), Ra(3, 5)])
assert orthogonal.isOrthogonalMatrix()
def testIsAlternatingMatrix(self):
alternate1 = createMatrix(2, 2, [0, 2, -2, 0])
assert alternate1.isAlternatingMatrix()
alternate2 = createMatrix(2, [1, 2, -2, 0])
assert not alternate2.isAntisymmetricMatrix()
def testIsSingular(self):
assert b6.isSingular()
def testTrace(self):
self.assertEqual(15, b4.trace())
def testDeterminant(self):
self.assertEqual(-2, b1.determinant())
#sf.bug #1914349
self.assertTrue(isinstance(b3.determinant(), int))
self.assertEqual(36, b3.determinant())
def testCofactor(self):
self.assertEqual(-6, b5.cofactor(1, 2))
def testCommutator(self):
commutator = createMatrix(2, 2, [5, -1, 9, -5])
self.assertEqual(commutator, b1.commutator(b2))
def testCharacteristicMatrix(self):
charMat = createMatrix(2, 2, \
[Poly({0:-1,1:1}, Int), Poly({0:-2}, Int)]+[Poly({0:-3}, Int), Poly({0:-4,1:1}, Int)])
self.assertEqual(charMat, b1.characteristicMatrix())
def testCharacteristicPolynomial(self):
assert d1.characteristicPolynomial() == d1.characteristicMatrix().determinant()
def testAdjugateMatrix(self):
adjugate = createMatrix(3, 3, [47, -15, -19, -14, -12, 2, -35, 13, 5])
self.assertEqual(adjugate, b4.adjugateMatrix())
assert d1 * d1.adjugateMatrix() == d1.determinant() * unitMatrix(d1.row)
def testCofactorMatrix(self):
cofact = d5.cofactorMatrix()
self.assertEqual(d5.cofactor(2, 3), cofact[2, 3])
def testSmithNormalForm(self):
self.assertEqual([12, 1, 1], b5.smithNormalForm())
self.assertRaises(ValueError, b6.smithNormalForm)
self.assertEqual([1, 1, 1], b7.smithNormalForm())
self.assertEqual([9, 3, 1], b8.smithNormalForm())
def testExtSmithNormalForm(self):
smith1 = Matrix(3, 3, [12, 0, 0, 0, 1, 0, 0, 0, 1])
U_1, V_1, M_1 = b5.extsmithNormalForm()
self.assertEqual(smith1, M_1)
self.assertEqual(M_1, U_1 * b5 * V_1)
smith2 = Matrix(3, 3, [9, 0, 0, 0, 3, 0, 0, 0, 1])
U_2, V_2, M_2 = b8.extsmithNormalForm()
self.assertEqual(smith2, M_2)
self.assertEqual(M_2, U_2 * b8 * V_2)
class FieldMatrixTest(unittest.TestCase):
def testDiv(self):
div = createMatrix(1, 2, [1, Ra(2, 3)])
self.assertEqual(div, c1 / 3)
def testKernel(self):
ker = c2.kernel()
self.assertTrue(not c2 * ker)
def testImage(self):
img = createMatrix(4,3,[1,2,-1]+[5,12,-2]+[1,3,-1]+[1,2,0])
self.assertEqual(img, c2.image())
def testRank(self):
self.assertEqual(3, c2.rank())
self.assertEqual(3, d3.rank())
def testInverseImage(self):
self.assertEqual(d6, d5 * d5.inverseImage(d6))
self.assertRaises(NoInverseImage, d2.inverseImage, unitMatrix(3))
def testSolve(self):
for i in range(1, d6.column+1):
self.assertEqual(d6[i], d5 * d5.solve(d6[i])[0])
sol1 = c1.solve(v2)
for i in range(len(sol1[1])):
self.assertEqual(v2, c1 * (sol1[0]+sol1[1][i]))
self.assertRaises(NoInverseImage, c3.solve, v3)
def testColumnEchelonForm(self):
echelon = createMatrix(4, 5,\
[Ra(0), 0, 1, 0, 0]+[0, 0, 0, 2, 3]+[0, 0, 0, 1, 0]+[0, 0, 0, 0, 1])
self.assertEqual(echelon, c2.columnEchelonForm())
class FieldSquareMatrixTest(unittest.TestCase):
def testPow(self):
pow3 = createMatrix(2, 2, [Ra(11, 2), Ra(-5, 2), Ra(-15, 4), Ra(7, 4)])
self.assertEqual(pow3, d1 ** (-2))
def testTriangulate(self):
triangle = createMatrix(3, 3, \
[Ra(1, 1), 2, 3]+[0, 5, -2]+[0, 0, Ra(-86, 5)])
self.assertEqual(triangle, d3.triangulate())
def testDeterminant(self):
self.assertEqual(Ra(-7, 15), d7.determinant())
def testInverse(self):
cinverse = createMatrix(3, 3)
cinverse.set([Ra(-47, 86), Ra(15, 86), Ra(19, 86)]+\
[Ra(7, 43), Ra(6, 43), Ra(-1, 43)]+[Ra(35, 86), Ra(-13, | |
<filename>utils/surface.py<gh_stars>100-1000
import maya.cmds as mc
import maya.OpenMaya as OpenMaya
import glTools.utils.base
import glTools.utils.curve
import glTools.utils.component
import glTools.utils.mathUtils
import glTools.utils.matrix
import glTools.utils.shape
import glTools.utils.stringUtils
import math
def isSurface(surface):
'''
Check if the specified object is a nurbs surface or transform parent of a surface
@param surface: Object to query
@type surface: str
'''
# Check object exists
if not mc.objExists(surface): return False
# Check shape
if mc.objectType(surface) == 'transform': surface = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
if mc.objectType(surface) != 'nurbsSurface': return False
# Return result
return True
def getSurfaceFn(surface):
'''
Create an MFnNurbsSurface class object from the specified nurbs surface
@param surface: Surface to create function class for
@type surface: str
'''
# Checks
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
if mc.objectType(surface) == 'transform':
surface = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
# Get MFnNurbsSurface
selection = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getSelectionListByName(surface,selection)
surfacePath = OpenMaya.MDagPath()
selection.getDagPath(0,surfacePath)
surfaceFn = OpenMaya.MFnNurbsSurface()
surfaceFn.setObject(surfacePath)
# Return result
return surfaceFn
def chordLength(surface,param=0.0,direction='u'):
'''
Return the length of a surface isoparm given a parameter and a direction
@param surface: Surface to query closest point from
@type surface: str
@param param: The parameter on the surface to query length of
@type param: float
@param direction: Direction along the surface to measure length of
@type direction: str
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
# Duplicate surface curve
curve = mc.duplicateCurve(surface+'.'+direction+'['+str(param)+']',ch=0,rn=0,local=0)
# Measure curve length
length = mc.arclen(curve[0])
# Cleanup
mc.delete(curve)
# Return result
return length
def closestPoint(surface,pos=(0,0,0)):
'''
Return closest point on surface to target position
@param surface: Surface to query closest point from
@type surface: str
@param pos: Position to query surface from
@type pos: tuple/list
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
# Get point world position
pos = glTools.utils.base.getPosition(pos)
pt = OpenMaya.MPoint(pos[0],pos[1],pos[2],1.0)
# Get surface function set
surfFn = getSurfaceFn(surface)
# Get uCoord and vCoord pointer objects
uCoord = OpenMaya.MScriptUtil()
uCoord.createFromDouble(0.0)
uCoordPtr = uCoord.asDoublePtr()
vCoord = OpenMaya.MScriptUtil()
vCoord.createFromDouble(0.0)
vCoordPtr = vCoord.asDoublePtr()
# get closest uCoord to edit point position
surfFn.closestPoint(pt,uCoordPtr,vCoordPtr,True,0.0001,OpenMaya.MSpace.kWorld)
return (OpenMaya.MScriptUtil(uCoordPtr).asDouble(),OpenMaya.MScriptUtil(vCoordPtr).asDouble())
def distToSurface(surface,pos=(0,0,0)):
'''
'''
# Get point world position
pos = glTools.utils.base.getPosition(pos)
# Get closest point to surface
uv = closestPoint(surface,pos)
pt = mc.pointOnSurface(surface,u=uv[0],v=uv[1],p=True)
# Get distance to surface point
dist = glTools.utils.mathUtils.distanceBetween(pos,pt)
# Return Result
return dist
def snapPtsToSurface(surface,pointList):
'''
@param surface: Nurbs surface to snap points to
@type surface: str
@param pointList: Point to snap to the specified surface
@type pointList: list
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!')
# Check points
pointList = mc.ls(pointList,fl=True)
# Transform types
transform = ['transform','joint','ikHandle','effector']
# Snap points
for pt in pointList:
# Check Transform
if transform.count(mc.objectType(pt)):
snapToSurface(surface,pt,0.0,0.0,True,snapPivot=False)
continue
# Move Point
pos = mc.pointPosition(pt)
(uParam,vParam) = closestPoint(surface,pos)
sPt = mc.pointOnSurface(surface,u=uParam,v=vParam,position=True)
mc.move(sPt[0],sPt[1],sPt[2],pt,ws=True,a=True)
def locatorSurface(surface,controlPoints=[],locatorScale=0.075,prefix=''):
'''
Drive the control point positions of a surface with a set of control locators
@param surface: Input surface to connect locators positions to
@type surface: str
@param controlPoints: List of control points to be driven by locators. If left as default (None), all control points will be connected.
@type controlPoints: list
@param locatorScale: Scale of the locators relative to the area of the surface
@type locatorScale: float
@param prefix: Name prefix for newly created nodes
@type prefix: str
'''
# Check surface
if not glTools.utils.surface.isSurface(surface):
raise Exception('Object '+surface+' is not a valid surface!')
if mc.objectType(surface) == 'transform':
surface = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
# Check prefix
if not prefix: prefix = glTools.utils.stringUtils.stripSuffix(surface)
# Calculate locator scale
locatorScale *= math.sqrt(glTools.utils.surface.surfaceArea(surface))
# Get Control Points
if not controlPoints:
controlPoints = glTools.utils.component.getComponentIndexList(surface)[surface]
else:
controlPoints = glTools.utils.component.getComponentIndexList(controlPoints)[surface]
# Create locators and connect to control points
locatorList = []
for cv in controlPoints:
# Get index (string)
ind = glTools.utils.component.getSingleIndex(surface,cv)
indStr = glTools.utils.stringUtils.stringIndex(ind,2)
# Create locator
loc = prefix+'_cv'+indStr+'_loc'
loc = mc.spaceLocator(n=loc)[0]
locatorList.append(loc)
mc.setAttr(loc+'.localScale',locatorScale,locatorScale,locatorScale)
# Get control point world position
pos = mc.pointPosition(surface+'.cv['+str(cv[0])+']['+str(cv[1])+']')
mc.setAttr(loc+'.t',pos[0],pos[1],pos[2])
mc.makeIdentity(loc,apply=True,t=1,r=1,s=1,n=0)
# Connect locator position to control point
mc.connectAttr(loc+'.worldPosition[0]',surface+'.controlPoints['+str(ind)+']')
return locatorList
def surfaceArea(surface,worldSpace=True):
'''
Calculates the surface area of a specified nurbs surface.
@param surface: Nurbs surface to calculate the surface area for
@type surface: str
@param worldSpace: Calculate the surface area in world or local space units
@type worldSpace: bool
'''
# Check Surface
if not mc.objExists(surface): raise Exception('Object '+surface+' does not exist!')
if mc.objectType(surface) == 'transform':
surfaceShape = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
if mc.objectType(surfaceShape) != 'nurbsSurface':
raise Exception('Object '+surface+' is not a valid nurbs surface!')
surface = surfaceShape
# Get MFnNurbsSurface
surfaceFn = getSurfaceFn(surface)
# Get surface area
area = 0.0
if worldSpace: area = surfaceFn.area(OpenMaya.MSpace.kWorld)
else: area = surfaceFn.area(OpenMaya.MSpace.kObject)
# Return result
return area
def snapToSurface(surface,obj,uValue=0.0,vValue=0.0,useClosestPoint=False,snapPivot=False):
'''
Snap an object (or transform pivot) to a specified point on a surface.
@param surface: Curve to snap to
@type surface: str
@param obj: Object to move to point on surface
@type obj: str
@param uValue: U Paramater value of the surface to snap to
@type uValue: float
@param vValue: V Paramater value of the surface to snap to
@type vValue: float
@param useClosestPoint: Use the closest point on the surface instead of the specified uv parameter
@type useClosestPoint: bool
@param snapPivot: Move only the objects pivot to the surface point
@type snapPivot: bool
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!!')
# Check uValue/vValue
minu = mc.getAttr(surface+'.minValueU')
maxu = mc.getAttr(surface+'.maxValueU')
minv = mc.getAttr(surface+'.minValueV')
maxv = mc.getAttr(surface+'.maxValueV')
# Closest Point
if useClosestPoint:
pos = mc.xform(obj,q=True,ws=True,rp=True)
(uValue,vValue) = closestPoint(surface,pos)
# Verify surface parameter
if uValue < minu or uValue > maxu: raise Exception('U paramater '+str(uValue)+' is not within the U parameter range for '+surface+'!!')
if vValue < minv or vValue > maxv: raise Exception('V paramater '+str(vValue)+' is not within the V parameter range for '+surface+'!!')
# Get surface point position
pnt = mc.pointPosition(surface+'.uv['+str(uValue)+']['+str(vValue)+']')
# Snap to Curve
piv = mc.xform(obj,q=True,ws=True,rp=True)
if snapPivot: mc.xform(obj,piv=pnt,ws=True)
else: mc.move(pnt[0]-piv[0],pnt[1]-piv[1],pnt[2]-piv[2],obj,r=True,ws=True)
def orientToSurface( surface,
obj,
uValue = 0.0,
vValue = 0.0,
useClosestPoint = False,
tangentUAxis = 'x',
tangentVAxis = 'y',
alignTo = 'u' ):
'''
Orient object to a specified point on a surface.
@param surface: Surface to orient object to
@type surface: str
@param obj: Object to orient
@type obj: str
@param uValue: U Paramater value of the surface to orient to
@type uValue: float
@param vValue: V Paramater value of the surface to orient to
@type vValue: float
@param useClosestPoint: Use the closest point on the surface instead of the specified uv parameter
@type useClosestPoint: bool
@param tangentUAxis: Basis axis that will be derived from the U tangent of the surface point
@type tangentUAxis: str
@param tangentVAxis: Basis axis that will be derived from the V tangent of the surface point
@type tangentVAxis: str
@param upAxis: Basis axis that will be derived from the upVector
@type upAxis: str
'''
# Check surface
if not isSurface(surface): raise Exception('Object '+surface+' is not a valid surface!!')
# Check Obj
transform = ['transform','joint','ikHandle','effector']
if not transform.count(mc.objectType(obj)):
raise Exception('Object '+obj+' is not a valid transform!!')
# Check uValue/vValue
minu = mc.getAttr(surface+'.minValueU')
maxu = mc.getAttr(surface+'.maxValueU')
minv = mc.getAttr(surface+'.minValueV')
maxv = mc.getAttr(surface+'.maxValueV')
# Closest Point
if useClosestPoint:
pos = mc.xform(obj,q=True,ws=True,rp=True)
(uValue,vValue) = closestPoint(surface,pos)
# Verify surface parameter
if uValue < minu or uValue > maxu: raise Exception('U paramater '+str(uValue)+' is not within the U parameter range for '+surface+'!!')
if vValue < minv or vValue > maxv: raise Exception('V paramater '+str(uValue)+' is not within the V parameter range for '+surface+'!!')
# Check object
if not mc.objExists(obj): raise Exception('Object '+obj+' does not exist!!')
rotateOrder = mc.getAttr(obj+'.ro')
# Get tangents at surface point
tanU = mc.pointOnSurface(surface,u=uValue,v=vValue,ntu=True)
tanV = mc.pointOnSurface(surface,u=uValue,v=vValue,ntv=True)
# Build rotation matrix
aimVector = tanU
if alignTo == 'v': aimVector = tanV
upVector = tanV
if alignTo == 'v': upVector = tanU
aimAxis = tangentUAxis
if alignTo == 'v': aimAxis = tangentVAxis
upAxis = tangentVAxis
if alignTo == 'v': upAxis = tangentUAxis
mat = glTools.utils.matrix.buildRotation(aimVector,upVector,aimAxis,upAxis)
rot = glTools.utils.matrix.getRotation(mat,rotateOrder)
# Orient object to surface
mc.rotate(rot[0],rot[1],rot[2],obj,a=True,ws=True)
def rebuild(surface,spansU=0,spansV=0,fullRebuildU=False,fullRebuildV=False,rebuildUfirst=True,replaceOrig=False):
'''
Do brute force surface rebuild for even parameterization
@param surface: Nurbs surface to rebuild
@type surface: str
@param spansU: Number of spans along U. If 0, keep original value.
@type spansU: int
@param spansV: Number of spans along V. If 0, keep original value.
@type spansV: int
@param replaceOrig: Replace original surface, or create new rebuilt surface.
@type replaceOrig: bool
'''
# ==========
# - Checks -
# ==========
# Check surface
if not isSurface(surface):
raise Exception('Object "'+surface+'" is not a valid surface!')
# Check spans
if not spansU: spansU = mc.getAttr(surface+'.spansU')
if not spansV: spansV = mc.getAttr(surface+'.spansV')
# =============
# - Rebuild U -
# =============
# Get V range
if rebuildUfirst:
dir = 'u'
opp = 'v'
spans = spansU
min = mc.getAttr(surface+'.minValueV')
max = mc.getAttr(surface+'.maxValueV')
else:
dir = 'v'
opp = 'u'
spans = spansV
min = mc.getAttr(surface+'.minValueU')
max = mc.getAttr(surface+'.maxValueU')
val = min + (max - min) * 0.5
# Caluculate surface length
iso_crv = mc.duplicateCurve(surface+'.'+opp+'['+str(val)+']',ch=0,rn=0,local=0)[0]
iso_len = mc.arclen(iso_crv)
iso_inc = iso_len / spans
# Get spaced isoparm list
curveFn = glTools.utils.curve.getCurveFn(iso_crv)
iso_list = [surface+'.'+dir+'['+str(curveFn.findParamFromLength(iso_inc*i))+']' for i in range(spans+1)]
mc.delete(iso_crv)
# Check full rebuild
if fullRebuildV:
# Extract isoparm curves
iso_crv_list = [mc.duplicateCurve(iso,ch=False,rn=False,local=False)[0] for iso in iso_list]
# Rebuild isoparm curves
for iso_crv in iso_crv_list:
mc.rebuildCurve(iso_crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=1,s=0,d=3,tol=0)
# Loft final surface
int_surface = mc.loft(iso_crv_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Delete intermediate curves
mc.delete(iso_crv_list)
else:
# Loft intermediate surface
int_surface = mc.loft(iso_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# =============
# - Rebuild V -
# =============
# Get V range (intermediate surface)
if rebuildUfirst:
dir = 'u'
opp = 'v'
spans = spansV
min = mc.getAttr(int_surface+'.minValueU')
max = mc.getAttr(int_surface+'.maxValueU')
else:
dir = 'v'
opp = 'u'
spans = spansU
min = mc.getAttr(int_surface+'.minValueV')
max = mc.getAttr(int_surface+'.maxValueV')
val = min + (max - min) * 0.5
# Caluculate surface length (intermediate surface)
iso_crv = mc.duplicateCurve(int_surface+'.'+opp+'['+str(val)+']',ch=0,rn=0,local=0)[0]
iso_len = mc.arclen(iso_crv)
iso_inc = iso_len / spans
# Get spaced isoparm list
curveFn = glTools.utils.curve.getCurveFn(iso_crv)
iso_list = [int_surface+'.'+dir+'['+str(curveFn.findParamFromLength(iso_inc*i))+']' for i in range(spans+1)]
mc.delete(iso_crv)
# Check full rebuild
if fullRebuildU:
# Extract isoparm curves
iso_crv_list = [mc.duplicateCurve(iso,ch=False,rn=False,local=False)[0] for iso in iso_list]
# Rebuild isoparm curves
for iso_crv in iso_crv_list:
mc.rebuildCurve(iso_crv,ch=False,rpo=True,rt=0,end=1,kr=0,kcp=0,kep=1,kt=1,s=0,d=3,tol=0)
# Loft final surface
rebuild_surface = mc.loft(iso_crv_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Delete intermediate curves
mc.delete(iso_crv_list)
else:
# Loft final surface
rebuild_surface = mc.loft(iso_list,ch=0,u=1,c=0,ar=1,d=3,ss=1,rn=0,po=0,rsn=True)[0]
# Rename rebuilt surface
rebuild_surface = mc.rename(rebuild_surface,surface+'_rebuild')
rebuild_surfaceShape = mc.listRelatives(surface,s=True,ni=True,pa=True)[0]
mc.delete(int_surface)
# Re-parameterize 0-1
mc.rebuildSurface(rebuild_surface,ch=False,rpo=True,dir=2,rt=0,end=1,kr=0,kcp=1,kc=1,tol=0,fr=0)
# Initialize return value
outputShape = rebuild_surfaceShape
# ====================
# - Replace Original -
# ====================
if | |
you cover your eyes during a scary part in a movie?",
"What is your guilty pleasure?",
"Has anyone ever walked in on you when taking a shit in the bathroom?",
"Do you pick your nose?",
"Do you sing in the shower?",
"Have you ever peed yourself?",
"What was your most embarrassing moment in public?",
"Have you ever farted loudly in public?",
"Have you ever tried to take a sexy picture of yourself?",
"Do you think {name} is cute?",
"What does your dream boy or girl look like?",
"What was the last thing you texted?",
"Do you think you'll marry your current partner?",
"How often do you wash your undergarments?",
"Have you ever tasted ear wax?",
"Have you ever farted and then blamed someone else?",
"Have you ever tasted your sweat?",
"What is the most illegal thing you have ever done?",
"Who is your favourite? Mom or Dad?",
"Would you trade in your dog for a million dollars?",
"If you were allowed to marry more than one person, would you? Who would you choose to marry?",
"Would you rather lose your sex organs forever or gain 200 pounds?",
"Would you choose to save 100 people without anyone knowing about it or not save them but have everyone praise you for it?",
"If you could only hear one song for the rest of your life, what would it be?",
"If you lost one day of your life every time you said a swear word, would you try not to do it?",
"Who in this room would be the worst person to date? Why?",
"Would you rather live with no internet or no A/C or heating?",
"If someone offered you $1 million dollars to break up with your partner, would you do it?",
"If you were reborn, what decade would you want to be born in?",
"If you could go back in time in erase one thing you said or did, what would it be?",
"Has your partner ever embarrassed you?",
"Have you ever thought about cheating on your partner?",
"If you could suddenly become invisible, what would you do?",
"Have you ever been caught checking someone out?",
"Have you ever waved at someone thinking they saw you when really they didn't? What did you do when you realized it?",
"What's the longest time you've stayed in the bathroom, and why did you stay for that long?",
"What's the most unflattering school picture of you?",
"Have you ever cried because you missed your parents so much?",
"Describe the strangest dream you've ever had. Did you like it?",
"Have you ever posted something on social media that you regret?",
"What is your biggest fear?",
"Do you pee in the shower?",
"Have you ever ding dong ditched someone?",
"The world ends next week and you can do anything you want. What would you do?",
"Would you wear your shirt inside out for a whole day if someone paid you $100?",
"What is the most childish thing that you still do?",
"How far would you go to land the guy or girl of your dreams?",
"Tell us about a time you embarrassed yourself in front of a crush.",
"Have you ever kept a library book?",
"Who is one person you pretend to like, but actually don’t?",
"What children’s movie could you watch over and over again?",
"Have you ever kissed an animal?",
"What was the last thing you ate?",
"Do you have any unusual talents?",
"Do you have any phobias?",
"Have you ever used someone else's password?",
"Have you ever ridden the bus without paying the fare?",
"Do you message people during your classes?",
"Have you ever fallen asleep during a class?",
"Have you ever bitten a toenail?",
"Have you ever stolen something?",
"Are you a hard-working student?",
"What was the best day of you life?",
"What was the strangest dream you ever had?",
"What is the most annoying thing to you (pet peeve)?",
"If you could have a superpower, what would it be?",
"Who is most important to you?",
"Do you have bad foot odor?",
"Do you have any silly nicknames?",
"When was the last time you wet the bed?",
"How many pancakes have you eaten in a single sitting?",
"Have you ever accidentally hit something with your car?",
"If you had to make out with any Disney character, who would it be?",
"Have you ever watched a movie you knew you shouldn’t?",
"Have you ever wanted to try LARP (Live Action Role-Play)?",
"What app on your phone do you waste the most time on?",
"Have you ever pretended to be sick to get out of something? If so, what was it?",
"What is the most food you’ve eaten in a single sitting?",
"Do you dance when you’re by yourself?",
"Would you have voted for or against Trump?",
"What song on the radio do you sing with every time it comes on?",
"Do you own a pair of footie pajamas?",
"Are you scared of the dark?",
"What ‘As seen on TV’ product do you secretly want to buy?",
"Do you still take bubble baths?",
"If you were home by yourself all day, what would you do?",
"How many selfies do you take a day?",
"What is something you’ve done to try to be ‘cooler’?",
"When was the last time you brushed your teeth?",
"Have you ever used self tanner?",
"What does your favorite pajamas look like?",
"Do you have a security blanket?",
"Have you ever eaten something off the floor?",
"Have you ever butt-dialed someone?",
"Do you like hanging out with your parents?",
"Have you ever got caught doing something you shouldn’t?",
"What part of your body do you love and which part do you hate?",
"Have you ever had lice?",
"Have you ever pooped your pants?",
"What was the last rate-R movie you watched?",
"Do you lick your plate?",
"What is something that no one else knows about you?",
"Do you write in a diary?",
"Who would you hate to see naked?",
"How long have you gone without a shower?",
"If you could only text one person for the rest of your life, but you could never talk to that person face to face, who would that be?",
"How long have you gone without brushing your teeth?",
"What's one thing you would never eat on a first date?",
"What have you seen that you wish you could unsee?",
"If you could be reincarnated into anyone's body, who would you want to become?",
"If you switched genders for the day, what would you do?",
"What's one food that you will never order at a restaurant?",
"What's the worst weather to be stuck outside in if all you could wear was a bathing suit?",
"If your car broke down in the middle of the road, who in this room would be the last person you would call? Why?",
"What's the most useless piece of knowledge you know?",
"What did you learn in school that you wish you could forget?",
"Is it better to use shampoo as soap or soap as shampoo?",
"If you ran out of toilet paper, would you consider wiping with the empty roll?",
"What would be the worst part about getting pantsed in front of your crush?",
"If you could only use one swear word for the rest of your life, which one would you choose?",
"What's the best thing to say to your friend that would be the worst thing to say to your crush?",
"Who do you think is the Beyonce of the group?",
"Would you rather eat dog food or cat food?",
"If you had nine lives, what would you do that you wouldn't do now?",
"If you could play a prank on anyone without getting caught, who would you | |
1,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', True, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'tas.b'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.TextToken, '@'),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x2008, 0xf00f),
'm': (0xf0, 0x4),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'tst',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{m}', False, False, 0, 0),
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'tst'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{m}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0xc800, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0xff, 0x0),
'disp': 0x0,
'cmd': 'tst',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.IMM, '0x{imm:x}', False, False, 0, 1),
Oper(OpType.REG, 'R0', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'tst'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.IntegerToken, '0x{imm:x}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'R0')
],
},
{
'opmask': (0xcc00, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0xff, 0x0),
'disp': 0x0,
'cmd': 'tst.b',
'width': 1,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.IMM, '0x{imm:x}', False, False, 0, 1),
Oper(OpType.REG, 'R0', True, True, 0, 0),
Oper(OpType.REG, 'GBR', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'tst.b'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.IntegerToken, '0x{imm:x}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@('),
(InstructionTextTokenType.RegisterToken, 'R0'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'GBR'),
(InstructionTextTokenType.TextToken, ')')
],
},
{
'opmask': (0x200a, 0xf00f),
'm': (0xf0, 0x4),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'xor',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{m}', False, False, 0, 0),
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'xor'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{m}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0xca00, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0xff, 0x0),
'disp': 0x0,
'cmd': 'xor',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.IMM, '0x{imm:x}', False, False, 0, 1),
Oper(OpType.REG, 'R0', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'xor'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.IntegerToken, '0x{imm:x}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'R0')
],
},
{
'opmask': (0xce00, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0xff, 0x0),
'disp': 0x0,
'cmd': 'xor.b',
'width': 1,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.IMM, '0x{imm:x}', False, False, 0, 1),
Oper(OpType.REG, 'R0', True, True, 0, 0),
Oper(OpType.REG, 'GBR', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'xor.b'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.IntegerToken, '0x{imm:x}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.TextToken, '@('),
(InstructionTextTokenType.RegisterToken, 'R0'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'GBR'),
(InstructionTextTokenType.TextToken, ')')
],
},
{
'opmask': (0x4024, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'rotcl',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'rotcl'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4025, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'rotcr',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'rotcr'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4004, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'rotl',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'rotl'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4005, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'rotr',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'rotr'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x400c, 0xf00f),
'm': (0xf0, 0x4),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shad',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{m}', False, False, 0, 0),
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shad'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{m}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4020, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shal',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shal'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4021, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shar',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shar'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x400d, 0xf00f),
'm': (0xf0, 0x4),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shld',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{m}', False, False, 0, 0),
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shld'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{m}'),
(InstructionTextTokenType.OperandSeparatorToken, ', '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4000, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shll',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shll'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4008, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shll2',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shll2'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4018, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shll8',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shll8'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4028, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shll16',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shll16'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4001, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shlr',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shlr'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4009, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shlr2',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shlr2'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4019, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shlr8',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shlr8'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x4029, 0xf0ff),
'm': (0x0, 0x0),
'n': (0xf00, 0x8),
'imm': (0x0, 0x0),
'disp': 0x0,
'cmd': 'shlr16',
'width': 0,
'size': 2,
'is_label': False,
'is_delay': False,
'args': [
Oper(OpType.REG, 'R{n}', False, False, 0, 0)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'shlr16'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.RegisterToken, 'R{n}')
],
},
{
'opmask': (0x8b00, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0xff,
'cmd': 'bf',
'width': 0,
'size': 2,
'is_label': True,
'is_delay': False,
'args': [
Oper(OpType.DISP, '0x{disp:x}', False, False, 0, 2)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'bf'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.PossibleAddressToken, '0x{disp:x}')
],
},
{
'opmask': (0x8f00, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0xff,
'cmd': 'bf/s',
'width': 0,
'size': 2,
'is_label': True,
'is_delay': True,
'args': [
Oper(OpType.DISP, '0x{disp:x}', False, False, 0, 2)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'bf/s'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.PossibleAddressToken, '0x{disp:x}')
],
},
{
'opmask': (0x8900, 0xff00),
'm': (0x0, 0x0),
'n': (0x0, 0x0),
'imm': (0x0, 0x0),
'disp': 0xff,
'cmd': 'bt',
'width': 0,
'size': 2,
'is_label': True,
'is_delay': False,
'args': [
Oper(OpType.DISP, '0x{disp:x}', False, False, 0, 2)
],
'tokens': [
(InstructionTextTokenType.InstructionToken, 'bt'),
(InstructionTextTokenType.TextToken, ' '),
(InstructionTextTokenType.PossibleAddressToken, '0x{disp:x}')
],
},
{
'opmask': (0x8d00, 0xff00),
'm': (0x0, 0x0),
'n': | |
if key not in keys}
obj.update(kwargs)
if kwextra:
obj.set_text('')
else:
return obj
# Get properties from old object
for key in ('ha', 'va', 'color', 'transform', 'fontproperties'):
kwextra[key] = getattr(obj, 'get_' + key)() # copy over attrs
text = kwargs.pop('text', obj.get_text())
x, y = kwargs.pop('position', (None, None))
pos = obj.get_position()
x = _notNone(kwargs.pop('x', x), pos[0])
y = _notNone(kwargs.pop('y', y), pos[1])
return self.text(x, y, text, **kwextra)
def context(self, *, mode=2, rc_kw=None, **kwargs):
"""
For internal use. Sets up temporary `~proplot.rctools.rc` settings by
returning the result of `~proplot.rctools.rc_configurator.context`.
Parameters
----------
rc_kw : dict, optional
A dictionary containing "rc" configuration settings that will
be applied to this axes. Temporarily updates the
`~proplot.rctools.rc` object.
**kwargs
Any of three options:
* A keyword arg for `Axes.format`, `XYAxes.format`,
or `ProjAxes.format`.
* A global "rc" keyword arg, like ``linewidth`` or ``color``.
* A standard "rc" keyword arg **with the dots omitted**,
like ``landcolor`` instead of ``land.color``.
The latter two options update the `~proplot.rctools.rc`
object, just like `rc_kw`.
Other parameters
----------------
mode : int, optional
The "getitem mode". This is used under-the-hood -- you shouldn't
have to use it directly. Determines whether queries to the
`~proplot.rctools.rc` object will ignore
`rcParams <https://matplotlib.org/users/customizing.html>`__.
This can help prevent a massive number of unnecessary lookups
when the settings haven't been changed by the user.
See `~proplot.rctools.rc_configurator` for details.
Returns
-------
`~proplot.rctools.rc_configurator`
The `proplot.rctools.rc` object primed for use in a "with"
statement.
dict
Dictionary of keyword arguments that are not `~proplot.rctools.rc`
properties, to be passed to the ``format`` methods.
"""
# Figure out which kwargs are valid rc settings
# TODO: Support for 'small', 'large', etc. font
kw = {} # for format
rc_kw = rc_kw or {}
for key, value in kwargs.items():
key_fixed = RC_NODOTSNAMES.get(key, None)
if key_fixed is None:
kw[key] = value
else:
rc_kw[key_fixed] = value
rc._getitem_mode = 0 # might still be non-zero if had error
# Return "context object", which is just the configurator itself
# primed for use in a "with" statement
return rc.context(rc_kw, mode=mode), kw
def format(
self, *, title=None, top=None,
figtitle=None, suptitle=None, rowlabels=None, collabels=None,
leftlabels=None, rightlabels=None,
toplabels=None, bottomlabels=None,
llabels=None, rlabels=None, tlabels=None, blabels=None,
**kwargs):
"""
Called by `XYAxes.format`, `ProjAxes.format`, and
`PolarAxes.format`. Formats the axes title(s), the a-b-c label, row
and column labels, and the figure title.
Parameters
----------
title : str, optional
The axes title.
abc : bool, optional
Whether to apply "a-b-c" subplot labelling based on the
``number`` attribute. If ``number`` is >26, the labels will loop
around to a, ..., z, aa, ..., zz, aaa, ..., zzz, ... Default is
:rc:`abc`.
abcstyle : str, optional
String denoting the format of a-b-c labels containing the character
``a`` or ``A``. ``'a'`` is the default, but e.g. ``'a.'``,
``'a)'``, or ``'A'`` might also be desirable. Default is
:rc:`abc.style`.
abcloc, titleloc : str, optional
Strings indicating the location for the a-b-c label and
main title. The following locations keys are valid. Defaults are
:rc:`abc.loc` and :rc:`title.loc`.
======================== ============================
Location Valid keys
======================== ============================
center above axes ``'center'``, ``'c'``
left above axes ``'left'``, ``'l'``
right above axes ``'right'``, ``'r'``
lower center inside axes ``'lower center``', ``'lc'``
upper center inside axes ``'upper center'``, ``'uc'``
upper right inside axes ``'upper right'``, ``'ur'``
upper left inside axes ``'upper left'``, ``'ul'``
lower left inside axes ``'lower left'``, ``'ll'``
lower right inside axes ``'lower right'``, ``'lr'``
======================== ============================
abcborder, titleborder : bool, optional
Whether to draw a white border around titles and a-b-c labels
positioned inside the axes. This can help them stand out on top
of artists plotted inside the axes. Defaults are
:rc:`abc.border` and :rc:`title.border`
ltitle, rtitle, ultitle, uctitle, urtitle, lltitle, lctitle, lrtitle \
: str, optional
Axes titles in particular positions. This lets you specify multiple
"titles" for each subplots. See the `abcloc` keyword.
top : bool, optional
Whether to try to put title and a-b-c label above the top subplot
panel (if it exists), or to always put them on the main subplot.
Default is ``True``.
rowlabels, colllabels : list of str, optional
Aliases for `leftlabels`, `toplabels`.
llabels, tlabels, rlabels, blabels : list of str, optional
Aliases for `leftlabels`, `toplabels`, `rightlabels`,
`bottomlabels`.
leftlabels, toplabels, rightlabels, bottomlabels : list of str, \
optional
The subplot row and column labels. If list, length must match
the number of subplots on the left, top, right, or bottom edges
of the figure.
figtitle, suptitle : str, optional
The figure "super" title, centered between the left edge of
the lefmost column of subplots and the right edge of the rightmost
column of subplots, and automatically offset above figure titles.
This is an improvement on matplotlib's "super" title, which just
centers the text between figure edges.
Note
----
The `abc`, `abcstyle`, `abcloc`, and `titleloc` keyword arguments
are actually rc configuration settings that are temporarily
changed by the call to `~Axes.context`. They are documented here
because it is extremely common to change them with `~Axes.format`.
They also appear in the tables in the `~proplot.rctools` documention.
See also
--------
:py:obj:`Axes.context`,
:py:obj:`XYAxes.format`,
:py:obj:`ProjAxes.format`,
:py:obj:`PolarAxes.format`,
"""
# Figure patch (for some reason needs to be re-asserted even if
# declared before figure is drawn)
kw = rc.fill({'facecolor': 'figure.facecolor'})
self.figure.patch.update(kw)
if top is not None:
self._title_above_panel = top
pad = rc['axes.titlepad']
if pad is not None:
self._set_title_offset_trans(pad)
self._title_pad = pad
# Super title
# NOTE: These are actually *figure-wide* settings, but that line
# gets blurred where we have shared axes, spanning labels, and
# whatnot. May result in redundant assignments if formatting more than
# one axes, but operations are fast so some redundancy is nbd.
# NOTE: Below workaround prevents changed *figure-wide* settings
# from getting overwritten when user makes a new axes.
fig = self.figure
suptitle = _notNone(figtitle, suptitle, None,
names=('figtitle', 'suptitle'))
if len(fig._axes_main) > 1 and rc._getitem_mode == 1:
kw = {}
else:
kw = rc.fill({
'fontsize': 'suptitle.size',
'weight': 'suptitle.weight',
'color': 'suptitle.color',
'fontfamily': 'font.family'
})
if suptitle or kw:
fig._update_figtitle(suptitle, **kw)
# Labels
llabels = _notNone(rowlabels, leftlabels, llabels,
None, names=('rowlabels', 'leftlabels', 'llabels'))
tlabels = _notNone(collabels, toplabels, tlabels,
None, names=('collabels', 'toplabels', 'tlabels'))
rlabels = _notNone(rightlabels, rlabels, None,
names=('rightlabels', 'rlabels'))
blabels = _notNone(bottomlabels, blabels, None,
names=('bottomlabels', 'blabels'))
for side, labels in zip(
('left', 'right', 'top', 'bottom'),
(llabels, rlabels, tlabels, blabels)):
kw = rc.fill({
'fontsize': side + 'label.size',
'weight': side + 'label.weight',
'color': side + 'label.color',
'fontfamily': 'font.family'
})
if labels or kw:
fig._update_labels(self, side, labels, **kw)
# A-b-c labels
titles_dict = self._titles_dict
if not self._panel_side:
# Location and text
abcstyle = rc['abc.style'] # changed or running format first time?
if 'abcformat' in kwargs: # super sophisticated deprecation system
abcstyle = kwargs.pop('abcformat')
_warn_proplot(
f'rc setting "abcformat" is deprecated. '
f'Please use "abcstyle".')
if abcstyle and self.number is not None:
if not isinstance(abcstyle, str) or (
abcstyle.count('a') != 1 and abcstyle.count('A') != 1):
raise ValueError(
f'Invalid abcstyle {abcstyle!r}. '
'Must include letter "a" or "A".')
abcedges = abcstyle.split('a' if 'a' in abcstyle else 'A')
text = abcedges[0] + _abc(self.number - 1) + abcedges[-1]
if 'A' in abcstyle:
text = text.upper()
self._abc_text = text
# Apply new settings
# Also if a-b-c label was moved, remove previous one and update
# text on new one, in case self._abc_text has not changed.
loc, obj, kw = self._get_title_props(abc=True)
iloc = self._abc_loc
obj = self._update_title(obj, **kw)
titles_dict[loc] = obj
if iloc is not None and loc != iloc:
self.abc.set_text('')
obj.set_text(self._abc_text)
self.abc = obj
self._abc_loc = loc
# Toggle visibility
# NOTE: If abc is a matplotlib 'title' attribute, making it
# invisible messes stuff up. Just set text to empty.
abc = rc['abc']
if abc is not None:
obj.set_text(self._abc_text if bool(abc) else '')
# Titles
# Tricky because we have to reconcile two workflows:
# 1. title='name' and titleloc='position'
# 2. ltitle='name', rtitle='name', etc., arbitrarily many titles
# First update existing titles
# NOTE: _update_title should never return new objects unless called
# with *inner* titles... *outer* titles will | |
0]
# return list(set(list1) | set(list2))
def attainAllButSpecifiedIndices(P, indices):
"""
This function serves to get all points whose index is not present in indices.
:param P: A PointSet object, which is a weighted set of points.
:param indices: A numpy array of indices with respect to P.
:return: A boolean array which contains True at each i entry where i is NOT in indices, and False's elsewhere.
"""
n = P.shape[0]
_, idxs_of_intersection, _ = np.intersect1d(P[:, -1], indices, return_indices=True) # get indices of intersection
mask = np.ones((n, ), dtype=bool) # initialize a mask of true vales
mask[idxs_of_intersection] = False # all entries whose respected index is in indices will have False value
return mask
def checkIfFileExists(file_path):
"""
The function at hand checks if a file at given path exists.
:param file_path: A string which contains a path of file.
:return: A boolean variable which counts for the existence of a file at a given path.
"""
file = pathlib.Path(file_path)
return file.exists()
def createRandomInitialVector(d):
"""
This function create a random orthogonal matrix which each column can be use as an initial vector for
regression problems.
:param d: A scalar denoting a desired dimension.
:return: None (using global we get A random orthogonal matrix).
"""
global x0
x0 = np.random.randn(d,d) # random dxd matrix
[x0, r] = np.linalg.qr(x0) # attain an orthogonal matrix
############################################## Computational Geometry tools ############################################
def computeAffineSpan(vectors):
"""
This function computes the affine span of set of vectors. This is done by substracting one point from the set of
vectors such that the origin is present in the "vectors - point" which then the affine span is equivalent to the
the span of the resulted substracted set with the translation of the point.
:param vectors: A list of vectors.
:return: A basis for the set of "vectors - p" where p is in vectors, along with a translation vector p.
"""
global svd
j = len(vectors)
v = vectors[0]
vectors = vectors[1:]
if len(vectors) > 0:
A = np.vstack(vectors)
A -= v # make sure that the origin will be in A - A[0, :]
# svd.fit(A)
_, _, V = np.linalg.svd(A) # compute the basis which A lies in the span of
return V[:, :j], v # return the basis, and a translation vector
# return svd.components_.T, v # return the basis, and a translation vector
else:
return np.zeros((v.shape[0],v.shape[0])), v
def gram_schmidt(vectors):
"""
The function at hand computes a basis with respect to a given set of vectors, using the notorious Gram Schmidt
approach.
:param vectors: A numpy array of vectors.
:return: A basis which the set of vectors are in it's span.
"""
basis = []
for v in vectors:
w = v - np.sum(np.dot(v, b) * b for b in basis)
if (w > 1e-10).any():
basis.append(w / np.linalg.norm(w))
return np.array(basis)
def computeDistanceToSubspace(point, X, v=None):
"""
This function is responsible for computing the distance between a point and a J dimensional affine subspace.
:param point: A numpy array representing a .
:param X: A numpy matrix representing a basis for a J dimensional subspace.
:param v: A numpy array representing the translation of the subspace from the origin.
:return: The distance between the point and the subspace which is spanned by X and translated from the origin by v.
"""
global OBJECTIVE_LOSS
if point.ndim > 1:
return OBJECTIVE_LOSS(np.linalg.norm(np.dot(point - v[np.newaxis, :], null_space(X)), ord=2, axis=1))
return OBJECTIVE_LOSS(np.linalg.norm(np.dot(point-v if v is not None else point, null_space(X))))
def computeRectangle(V, side_length=1):
"""
This function computes a rectangle by receiving an "orthogonal matrix", by simply adding its negate.
:param V: A list of vectors which are columns of an orthogonal matrix.
:param side_length: The desired length of each rectangle edge.
:return: set of vectors representing the sides of the rectangle.
"""
sides = copy.deepcopy(V) # copy the orthogonal matrix
sides.append([-x for x in sides]) # add its negate
return side_length * np.vstack(sides) # make sure that the sides have desired length
def computeRectangles(rectangle, desired_eps, t, v):
"""
The function at hand, given a specific rectangle computes 1/desired_eps^|rectangle| sub rectangles.
:param rectangle: A list of vectors which are the edges of a rectangle.
:param desired_eps: A scalar which makes up for the length of the sides of each sub rectangle.
:param t: A scalar which counts for the number of sides of the rectangle.
:param v: A translation vector.
:return: A list of the subretangles such that the union of this list is the input rectangle itself.
"""
coefs = np.arange(desired_eps, 1+desired_eps, desired_eps) # create an array of coefficentes for which dissect
# the length of each side of side of the rectangle into
# 1/desired_eps equal parts
all_coefs = np.hstack((-np.flip(coefs), coefs)) # add also the negate of coefs for counting to other half of each
# side of the rectangle
end_points = [all_coefs for i in range(rectangle.shape[0] // 2)] # duplicate the coefs for each positive side of
# the rectangle
points = np.meshgrid(*end_points) # compute a mesh grid using the duplicated coefs
points = np.array([p.flatten() for p in points]) # flatten each point in the meshgrid for computing the
# rectrangle parts
Rs = [] # Initialize an empty list
# using points, we create here the subrectangle parts
for j in range(points.shape[1]): # move along combination of coefficients
if j + 1 < points.shape[1] - 1:
for i in range(points[:, j].shape[0]): # for each combination of coefts, we create an appropriate rectangle
Rs.append(np.vstack((np.array([points[i, j] * rectangle[i, :]]),
np.array([points[i, j+1] * rectangle[i, :]]))) + v[np.newaxis, :])
return Rs
def computePointsInRectangle(P, rectangle, v):
"""
The function at hand returns all the points inside a given rectangle.
:param P: A PointSet object, which is a weighted set of points.
:param rectangle: A numpy array containing the side edges of a rectangle
:param v: A translation vector with respect to the rectangle
:return: A tuple containing a weighted set of the points which are inside the rectangle, and their indices in the
PointSet P.
"""
if rectangle.shape[0] >= 2 * rectangle.shape[1]: # A requirement for using ConvexHull like methods
polyhedra = Delaunay(rectangle) # compute a "polyhedra" which is the rectangle
idxs = np.where(np.apply_along_axis(lambda p: polyhedra.find_simplex(p) >= 0, 1, P.P[:, :-1]))[0] # compute all
# indices of
# points in
# the
# rectangle
return P.attainSubset(idxs), idxs
else: # This case accounts for dealing with 1 dimensional rectangle - bounded line
orth_line = null_space((rectangle[0, :] - v)[:, np.newaxis].T) # compute the orthant of rectangle
Q = np.linalg.norm((P.P[:, :-1]-v).dot(orth_line), axis=1) # get the distance of each point from the rectangle
idxs = np.where(Q == 0)[0] # attain all points are on the span of the rectangle (along the line)
if idxs.size == 0: # if there is no points on span of the rectangle
return None, None
else:
# for each point compute whether it lies on the rectangle (bouned range on the line) or not
Q = np.where(np.apply_along_axis(lambda p: np.all(np.logical_and(np.greater_equal(p, rectangle[0, :]),
np.less_equal(p, rectangle[1, :]))),
arr=P.P[idxs, :-1], axis=1))[0]
if Q.size == 0: # If there is no such point
return None, None
else: # otherwise
return P.attainSubset(idxs[Q]), idxs[Q]
#################################################### Embeddings ########################################################
def stabledevd(p):
# we set beta = 0 by analogy with the normal and cauchy case
# in order to get symmetric stable distributions.
np.random.seed()
theta=np.pi*(np.random.uniform(0,1) - 0.5)
W = -np.log(np.random.uniform(0,1)) # takes natural log
left = np.sin(p*theta)/np.pow(np.cos(theta), 1.0/p)
right= np.pow(np.cos(theta*(1.0 - p))/W, ((1.0-p)/p))
holder=left*right
return holder
def computeSparseEmbeddingMatrix(d):
global Z, J
s = Z ** 3 / 0.5
m = int(J ** 2 / 0.5 ** Z)
S = np.zeros((d, m))
for i in range(m):
S[np.random.choice(np.arange(d), size=s, replace=False),i] = np.random.choice([-1, 1], size=s) / np.sqrt(s)
return S
def computeDistortionEmbeddingLp(n,d):
global Z
omega = 10
s = int(omega * d ** 5 * np.log(d) ** 5)
e = np.eye(N=s, M=1, dtype=np.float)
S = np.zeros((s, n))
np.random.seed()
idxs = np.random.choice(a=np.arange(s), size=(n, ))
D = np.zeros((1,n))
for i in range(n):
S[:, | |
score#cl_weight*(1/npmean(distances_sel))
else: return None, None
if enter == 1:
if npmean(distances) <= 0.05: return 1.0
if npmean(distances) == 0.0: return 1.0
return score
else: return None, None
def envelope_score(self,map_target, primary_boundary, structure_instance,norm=True):
"""
Calculate the envelope score between a target Map and a Structure Instances.
Arguments:
*map_target*
Target Map Instance.
*primary_boundary*
Value specified is calculated with primary_boundary of the map object.
*structure_instance*
Structure Instance to compare.
Return:
Envelope score
"""
binMap = map_target.make_bin_map(primary_boundary)
max_score = float(-2*numsum(binMap.fullMap))
min_score = float(numsum(binMap.fullMap)-2*numsum(binMap.fullMap+1))
blurrer = StructureBlurrer()
struct_binMap = blurrer.make_atom_overlay_map1(map_target, structure_instance)
grid = struct_binMap.get_pos(0.9,1.1)
for x,y,z in grid:
g = binMap[z][y][x]
if g == -1:
binMap[z][y][x] = 2
elif g == 0:
binMap[z][y][x] = -2
#score=binMap.fullMap.sum()
score = float(numsum(binMap.fullMap))
if norm:
norm_score = float((score-min_score)/(max_score-min_score))
return norm_score
else:
return score
def envelope_score_map(self,map_target, map_probe,map_target_threshold=0,map_probe_threshold=0,norm=True):
"""
Calculate the envelope score between two Map instance using numoy array.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
EMMap threshold
use calcualte_map_threshold to calculate map_target_threshold and map_probe_threshold.
Return:
Envelope score
"""
if self.mapComparison(map_target, map_probe):
if map_target_threshold==0:
map_target_threshold=self.calculate_map_threshold(map_target)
if map_probe_threshold==0:
map_probe_threshold=self.calculate_map_threshold(map_probe)
binMap = map_target.make_bin_map(map_target_threshold)
max_score = float(-2*numsum(binMap.fullMap))
min_score = float(numsum(binMap.fullMap)-2*numsum(binMap.fullMap+1))
struct_binMap = map_probe.make_bin_map(map_probe_threshold)
newMap=binMap.fullMap+2*struct_binMap.fullMap
hist_array=histogram(newMap,4)
score=2*hist_array[0][0]-(2*(hist_array[0][1]))-(hist_array[0][2])
#print score, max_score, min_score, numsum(binMap.fullMap)
if norm:
norm_score = float((score-min_score))/(max_score-min_score)
return norm_score
else:
return score
#calculate percent of overlap for two contoured maps
def _percent_overlap(self,map_target,map_probe,map_target_threshold,map_probe_threshold,flagsize=0):
"""
Calculate the fraction of overlap between two map grids.
Arguments:
*map_target, map_probe*
EMMap instances to compare.
*map_target_threshold,map_probe_threshold*
map contour thresholds for map_target and map_probe.
Return:
Percent overlap with respect to smaller grid
"""
if self.mapComparison(map_target,map_probe):
# contour the first map
binmap1 = map_target.fullMap > float(map_target_threshold)
binmap2 = map_probe.fullMap > float(map_probe_threshold)
# percent calculated on the smaller contoured volume (can be changed)
minim = len(map_target.fullMap[binmap1])
if len(map_probe.fullMap[binmap2]) < minim: minim = len(map_probe.fullMap[binmap2])
maskmap = (binmap1*binmap2) > 0
if flagsize == 1: return numsum(maskmap), numsum(binmap1), numsum(binmap2)
#print numsum(binmap1),numsum(binmap2),numsum(maskmap),minim
if not minim == 0.0: return float(len(map_target.fullMap[maskmap]))/minim
else:
print("Check map contour!!")
return 0.0
else:
print("@@@ Maps could not be matched")
return -1.0
def SCCC(self,map_target,resolution_densMap,sigma_map,structure_instance,rigid_body_structure,write=False,c_mode=True):
"""
Calculate Segment based cross-correlation from Pandurangan et al. 2013,J Struct Biol. 2013 Dec 12
It is a local CCC around a selection of atoms.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*structure_instance*
Structure instance to compare
*rigid_body_structure*
Rigid-body Structure instance.
. Return:
SCCC score
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
outline = ""
resolution_densMap=float(resolution_densMap)
whole_fit_map = blurrer.gaussian_blur(structure_instance, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
sim_map = blurrer.gaussian_blur(rigid_body_structure, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
minDens = sim_map.std()
sim_mask_array = sim_map._get_maskArray(minDens)
#Apply the mask to em and simulated maps
mask_emMap=map_target._get_maskMap(sim_mask_array)
mask_simMap = whole_fit_map._get_maskMap(sim_mask_array)
#sse_lccf=scorer.CCC(mask_emMap,mask_simMap)
sse_lccf,ov=scorer.CCC_map(mask_emMap,mask_simMap,cmode=c_mode)
#return the overall score
if write==True:
outline+='SCCC for segment %f\n'%(sse_lccf)
return outline
return sse_lccf
def SCCC_LAP(self,map_target,resolution_densMap,sigma_map,structure_instance,rigid_body_structure,write=False):
"""
Calculate Segment based cross-correlation from Pandurangan et al. 2013,J Struct Biol. 2013 Dec 12
It is a local CCC around a selection of atoms.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*structure_instance*
Structure instance to compare
*rigid_body_structure*
Rigid-body Structure instance.
. Return:
SCCC score
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
outline = ""
resolution_densMap=float(resolution_densMap)
whole_fit_map = blurrer.gaussian_blur(structure_instance, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
sim_map = blurrer.gaussian_blur(rigid_body_structure, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
minDens = sim_map.std()
sim_mask_array = sim_map._get_maskArray(minDens)
#Apply the mask to em and simulated maps
mask_emMap=map_target._get_maskMap(sim_mask_array)
mask_simMap = whole_fit_map._get_maskMap(sim_mask_array)
sse_lccf=scorer.laplace_CCC(mask_emMap,mask_simMap)
#return the overall score
if write==True:
outline+='SCCC for segment %f\n'%(sse_lccf)
return outline
return sse_lccf
def SCCC_MI(self,map_target,resolution_densMap,sigma_map,structure_instance,rigid_body_structure,write=False):
"""
Calculate Segment based cross-correlation from Pandurangan et al. 2013,J Struct Biol. 2013 Dec 12
It is a local CCC around a selection of atoms.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, the default in Chimera (Petterson et al, 2004);
0.425R the fullwidth half maximum being equal to the resolution, as used by FlexEM (Topf et al, 2008);
0.5R the distance between the two inflection points being the same length as the resolution, an option in Chimera (Petterson et al, 2004);
1R where the sigma value simply equal to the resolution, as used by NMFF (Tama et al, 2004).
*structure_instance*
Structure instance to compare
*rigid_body_structure*
Rigid-body Structure instance.
. Return:
SCCC score
"""
blurrer = StructureBlurrer()
scorer = ScoringFunctions()
outline = ""
resolution_densMap=float(resolution_densMap)
whole_fit_map = blurrer.gaussian_blur(structure_instance, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
sim_map = blurrer.gaussian_blur(rigid_body_structure, resolution_densMap, densMap=map_target, sigma_coeff=sigma_map, normalise=True)
minDens = sim_map.std()
sim_mask_array = sim_map._get_maskArray(minDens)
#Apply the mask to em and simulated maps
mask_emMap=map_target._get_maskMap(sim_mask_array)
mask_simMap = whole_fit_map._get_maskMap(sim_mask_array)
sse_lccf=scorer.MI(mask_emMap,mask_simMap)
#return the overall score
if write==True:
outline+='SCCC for segment %f\n'%(sse_lccf)
return outline
return sse_lccf
def calc_moc(self,indices,map_probe,map_target):
map_target_mask = map_target.fullMap[indices]
##map_target_mask = map_target_mask - float(map_target_mask.sum()/len(map_target_mask))
map_probe_mask = map_probe.fullMap[indices]
##map_probe_mask = map_probe_mask - float(map_probe_mask.sum()/len(map_probe_mask))
num = numsum(map_target_mask * map_probe_mask)
den = sqrt(numsum(square(map_target_mask))*numsum(square(map_probe_mask)))
if den == 0.0: return -1.0
return num/den
def SMOC(self,map_target,resolution_densMap,structure_instance,win=11,rigid_body_file=None,sigma_map=0.225,write=False,c_mode=True):
"""
Calculate Local cross correlation (Mander's Overlap)
It is a local Overlap Coefficient calculated on atoms in sliding residue windows along the chain.
Arguments:
*map_target*
Target Map Instance.
*resolution_densMap*
Parameter need for Structure Blurrer.
Resolution of the target map.
*structure_instance*
Model structure instance.
*win*
Overlapping Window length to calculate the score
*rigid_body_file*
Rigid-body file.
*sigma_map*
Parameter need for Structure Blurrer.
The sigma value (multiplied by the resolution) that controls the width of the Gaussian.
Default values is 0.356.
Other values used :
0.187R corresponding with the Gaussian width of the Fourier transform falling to half the maximum at 1/resolution, as used in Situs (Wriggers et al, 1999);
0.356R corresponding to the Gaussian width at 1/e maximum height equaling the resolution, | |
<gh_stars>100-1000
# -*- coding: utf-8 -*-
"""
chemdataextractor.text
~~~~~~~~~~~~~~~~~~~~~~
Tools for processing text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import unicodedata
from bs4 import UnicodeDammit
#: Control characters.
CONTROLS = {
'\u0001', '\u0002', '\u0003', '\u0004', '\u0005', '\u0006', '\u0007', '\u0008', '\u000e', '\u000f', '\u0011',
'\u0012', '\u0013', '\u0014', '\u0015', '\u0016', '\u0017', '\u0018', '\u0019', '\u001a', '\u001b',
}
# There are further control characters, but they are instead replaced with a space by unicode normalization
# '\u0009', '\u000a', '\u000b', '\u000c', '\u000d', '\u001c', '\u001d', '\u001e', '\u001f'
#: Hyphen and dash characters.
HYPHENS = {
'-', # \u002d Hyphen-minus
'‐', # \u2010 Hyphen
'‑', # \u2011 Non-breaking hyphen
'⁃', # \u2043 Hyphen bullet
'‒', # \u2012 figure dash
'–', # \u2013 en dash
'—', # \u2014 em dash
'―', # \u2015 horizontal bar
}
#: Minus characters.
MINUSES = {
'-', # \u002d Hyphen-minus
'−', # \u2212 Minus
'-', # \uff0d Full-width Hyphen-minus
'⁻', # \u207b Superscript minus
}
#: Plus characters.
PLUSES = {
'+', # \u002b Plus
'+', # \uff0b Full-width Plus
'⁺', # \u207a Superscript plus
}
#: Slash characters.
SLASHES = {
'/', # \u002f Solidus
'⁄', # \u2044 Fraction slash
'∕', # \u2215 Division slash
}
#: Tilde characters.
TILDES = {
'~', # \u007e Tilde
'˜', # \u02dc Small tilde
'⁓', # \u2053 Swung dash
'∼', # \u223c Tilde operator
'∽', # \u223d Reversed tilde
'∿', # \u223f Sine wave
'〜', # \u301c Wave dash
'~', # \uff5e Full-width tilde
}
#: Apostrophe characters.
APOSTROPHES = {
"'", # \u0027
'’', # \u2019
'՚', # \u055a
'Ꞌ', # \ua78b
'ꞌ', # \ua78c
''', # \uff07
}
#: Single quote characters.
SINGLE_QUOTES = {
"'", # \u0027
'‘', # \u2018
'’', # \u2019
'‚', # \u201a
'‛', # \u201b
}
#: Double quote characters.
DOUBLE_QUOTES = {
'"', # \u0022
'“', # \u201c
'”', # \u201d
'„', # \u201e
'‟', # \u201f
}
#: Accent characters.
ACCENTS = {
'`', # \u0060
'´', # \u00b4
}
#: Prime characters.
PRIMES = {
'′', # \u2032
'″', # \u2033
'‴', # \u2034
'‵', # \u2035
'‶', # \u2036
'‷', # \u2037
'⁗', # \u2057
}
#: Quote characters, including apostrophes, single quotes, double quotes, accents and primes.
QUOTES = APOSTROPHES | SINGLE_QUOTES | DOUBLE_QUOTES | ACCENTS | PRIMES
#: Uppercase and lowercase greek letters.
GREEK = {
'Α', # \u0391
'Β', # \u0392
'Γ', # \u0393
'Δ', # \u0394
'Ε', # \u0395
'Ζ', # \u0396
'Η', # \u0397
'Θ', # \u0398
'Ι', # \u0399
'Κ', # \u039a
'Λ', # \u039b
'Μ', # \u039c
'Ν', # \u039d
'Ξ', # \u039e
'Ο', # \u039f
'Π', # \u03a0
'Ρ', # \u03a1
'Σ', # \u03a3
'Τ', # \u03a4
'Υ', # \u03a5
'Φ', # \u03a6
'Χ', # \u03a7
'Ψ', # \u03a8
'Ω', # \u03a9
'α', # \u03b1
'β', # \u03b2
'γ', # \u03b3
'δ', # \u03b4
'ε', # \u03b5
'ζ', # \u03b6
'η', # \u03b7
'θ', # \u03b8
'ι', # \u03b9
'κ', # \u03ba
'λ', # \u03bb
'μ', # \u03bc
'ν', # \u03bd
'ξ', # \u03be
'ο', # \u03bf
'π', # \u03c0
'ρ', # \u03c1
'σ', # \u03c3
'τ', # \u03c4
'υ', # \u03c5
'φ', # \u03c6
'χ', # \u03c7
'ψ', # \u03c8
'ω', # \u03c9
}
#: Names of greek letters spelled out as words.
GREEK_WORDS = {
'Alpha', 'Beta', 'Gamma', 'Delta', 'Epsilon', 'Zeta', 'Eta', 'Theta', 'Iota', 'Kappa', 'Lambda', 'Mu', 'Nu', 'Xi',
'Omicron', 'Pi', 'Rho', 'Sigma', 'Tau', 'Upsilon', 'Phi', 'Chi', 'Psi', 'Omega', 'alpha', 'beta', 'gamma', 'delta',
'epsilon', 'zeta', 'eta', 'theta', 'iota', 'kappa', 'lamda', 'mu', 'nu', 'xi', 'omicron', 'pi', 'rho', 'sigma',
'tau', 'upsilon', 'phi', 'chi', 'psi', 'omega'
}
#: Words that should not be capitalized in titles.
SMALL = {
'a', 'an', 'and', 'as', 'at', 'but', 'by', 'en', 'for', 'if', 'in', 'of', 'on', 'or', 'the', 'to', 'v', 'v', 'via',
'vs', 'vs'
}
#: Words that should not be capitalized in names.
NAME_SMALL = {
'abu', 'bon', 'bin', 'da', 'dal', 'de', 'del', 'der', 'de', 'di', u'dí', 'ibn', 'la', 'le', 'san', 'st', 'ste',
'van', 'vel', 'von', 'y'
}
# This isn't every possible TLD, just the most common, to avoid false positives.
TLDS = {
'aero', 'asia', 'biz', 'cat', 'com', 'coop', 'edu', 'eu', 'gov', 'info', 'int', 'jobs', 'mil', 'mobi', 'museum',
'name', 'net', 'org', 'pro', 'tel', 'travel', 'xxx', 'ad', 'as', 'ar', 'au', 'br', 'bz', 'ca', 'cc', 'cd', 'co',
'ch', 'cn', 'de', 'dj', 'es', 'fr', 'fm', 'it', 'io', 'jp', 'la', 'ly', 'me', 'ms', 'nl', 'no', 'nu', 'ru', 'sc',
'se', 'sr', 'su', 'tk', 'tv', 'uk', 'us', 'ws'
}
#: A variety of numbers, spelled out as words.
NUMBERS = {
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve',
'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty', 'thirty', 'forty',
'fifty', 'sixty', 'seventy', 'eighty', 'ninety', 'hundred', 'thousand', 'million', 'billion', 'trillion'
}
#: Regular expression that matches email addresses.
EMAIL_RE = re.compile(r'([\w\-\.\+%]+@(\w[\w\-]+\.)+[\w\-]+)', re.I | re.U)
#: Regular expression that matches DOIs.
DOI_RE = re.compile(r'^10\.\d{4,9}/[-\._;()/:A-Z0-9]+$', re.U)
#: Regular expression that matches ISSNs.
ISSN_RE = re.compile(r'^\d{4}-\d{3}[\dX]$', re.U)
#: Regular expression that matches control characters not allowed in XML.
CONTROL_RE = re.compile('[^\u0020-\uD7FF\u0009\u000A\u000D\uE000-\uFFFD\u10000-\u10FFFF]+')
def get_encoding(input_string, guesses=None, is_html=False):
"""Return the encoding of a byte string. Uses bs4 UnicodeDammit.
:param string input_string: Encoded byte string.
:param list[string] guesses: (Optional) List of encoding guesses to prioritize.
:param bool is_html: Whether the input is HTML.
"""
converted = UnicodeDammit(input_string, override_encodings=[guesses] if guesses else [], is_html=is_html)
return converted.original_encoding
def levenshtein(s1, s2, allow_substring=False):
"""Return the Levenshtein distance between two strings.
The Levenshtein distance (a.k.a "edit difference") is the number of characters that need to be substituted,
inserted or deleted to transform s1 into s2.
Setting the `allow_substring` parameter to True allows s1 to be a
substring of s2, so that, for example, "hello" and "hello there" would have a distance of zero.
:param string s1: The first string
:param string s2: The second string
:param bool allow_substring: Whether to allow s1 to be a substring of s2
:returns: Levenshtein distance.
:rtype int
"""
len1, len2 = len(s1), len(s2)
lev = []
for i in range(len1 + 1):
lev.append([0] * (len2 + 1))
for i in range(len1 + 1):
lev[i][0] = i
for j in range(len2 + 1):
lev[0][j] = 0 if allow_substring else j
for i in range(len1):
for j in range(len2):
lev[i + 1][j + 1] = min(lev[i][j + 1] + 1, lev[i + 1][j] + 1, lev[i][j] + (s1[i] != s2[j]))
return min(lev[len1]) if allow_substring else lev[len1][len2]
def bracket_level(text, open={'(', '[', '{'}, close={')', ']', '}'}):
"""Return 0 if string contains balanced brackets or no brackets."""
level = 0
for c in text:
if c in open:
level += 1
elif c in close:
level -= 1
return level
def is_punct(text):
for char in text:
if not unicodedata.category(char).startswith('P'):
return False
else:
return True
def is_ascii(text):
for char in text:
if ord(char) >= 128:
return False
else:
return True
def like_url(text):
if len(text) < 1:
return False
if text.startswith('http://'):
return True
elif text.startswith('www.') and len(text) >= 5:
return True
if len(text) < 2 or text[0] == '.' or text[-1] == '.' or '.' not in text:
return False
tld = text.rsplit('.', 1)[1].split(':', 1)[0]
if tld.endswith('/'):
return True
if tld.isalpha() and tld in TLDS:
return True
return False
def like_number(text):
text = text.replace(',', '').replace('.', '')
if text.isdigit():
return True
if text.count('/') == 1:
num, denom = text.split('/')
if like_number(num) and like_number(denom):
return True
if text in NUMBERS:
return True
return False
def word_shape(text):
prev_m = ''
seq = 0
shape = []
for c in text:
if c.isdigit():
m = 'd' # Digits
elif c in GREEK:
m = 'g' # Greek letters
elif c.isalpha():
m = 'X' if c.isupper() else 'x' # Uppercase or lowercase alphabetical
elif c in QUOTES:
m = "'" # Quotes and apostrophes
elif c in {':', ';'}:
m = ':' # Colons and semicolons
elif c in {'!', '?', '.'}:
m = '.' # Sentence ends
elif c in {'(', '[', '{', ')', ']', '}'}:
m = 'b' # Brackets
elif c in {'°', '%'}:
m = 'u' # units
elif c in {'■', '◼', '●', '▲', '○', '◆', '▼', '⧫', '△', '◇', '▽', '⬚', '□'}:
m = 'l' # list markers
elif c in {',', '$', '&', '-'}:
m = c # Stay the same
else:
| |
a blueprint/deployment resource to target_path.
This mirrors ctx.download_resource, but for workflow contexts.
See CloudifyContext.download_resource.
"""
return self._internal.handler.download_deployment_resource(
resource_path=resource_path,
target_path=target_path)
def send_event(self, event, event_type='workflow_stage',
args=None, additional_context=None):
"""Sends a workflow event
:param event: The event
:param event_type: The event type
:param args: additional arguments that may be added to the message
:param additional_context: additional context to be added to the
context
"""
return self._process_task(SendWorkflowEventTask(
event=event,
event_type=event_type,
event_args=args,
additional_context=additional_context,
workflow_context=self,
))
def _execute_operation(self,
operation,
node_instance,
operations,
related_node_instance=None,
kwargs=None,
allow_kwargs_override=False,
send_task_events=DEFAULT_SEND_TASK_EVENTS):
kwargs = kwargs or {}
op_struct = operations.get(operation, {})
if not op_struct.get('operation'):
return NOPLocalWorkflowTask(self)
plugin_name = op_struct['plugin']
# could match two plugins with different executors, one is enough
# for our purposes (extract package details)
try:
plugin = [p for p in node_instance.node.plugins
if p['name'] == plugin_name][0]
except IndexError:
raise RuntimeError('Plugin not found: {0}'.format(plugin_name))
operation_mapping = op_struct['operation']
has_intrinsic_functions = op_struct['has_intrinsic_functions']
operation_properties = op_struct.get('inputs', {})
operation_executor = op_struct['executor']
operation_total_retries = op_struct['max_retries']
operation_retry_interval = op_struct['retry_interval']
operation_timeout = op_struct.get('timeout', None)
operation_timeout_recoverable = op_struct.get('timeout_recoverable',
None)
task_name = operation_mapping
if operation_total_retries is None:
total_retries = self.internal.get_task_configuration()[
'total_retries']
else:
total_retries = operation_total_retries
if plugin and plugin['package_name']:
plugin = self.internal.handler.get_plugin(plugin)
node_context = {
'node_id': node_instance.id,
'node_name': node_instance.node_id,
'plugin': {
'name': plugin_name,
'package_name': plugin.get('package_name'),
'package_version': plugin.get('package_version'),
'visibility': plugin.get('visibility'),
'tenant_name': plugin.get('tenant_name'),
'source': plugin.get('source')
},
'operation': {
'name': operation,
'retry_number': 0,
'max_retries': total_retries
},
'has_intrinsic_functions': has_intrinsic_functions,
'host_id': node_instance._node_instance.host_id,
'executor': operation_executor
}
# central deployment agents run on the management worker
# so we pass the env to the dispatcher so it will be on a per
# operation basis
if operation_executor == 'central_deployment_agent':
agent_context = self.bootstrap_context.get('cloudify_agent', {})
node_context['execution_env'] = agent_context.get('env', {})
if related_node_instance is not None:
relationships = [rel.target_id
for rel in node_instance.relationships]
node_context['related'] = {
'node_id': related_node_instance.id,
'node_name': related_node_instance.node_id,
'is_target': related_node_instance.id in relationships
}
final_kwargs = self._merge_dicts(merged_from=kwargs,
merged_into=operation_properties,
allow_override=allow_kwargs_override)
return self.execute_task(
task_name,
local=self.local,
kwargs=final_kwargs,
node_context=node_context,
send_task_events=send_task_events,
total_retries=total_retries,
retry_interval=operation_retry_interval,
timeout=operation_timeout,
timeout_recoverable=operation_timeout_recoverable)
@staticmethod
def _merge_dicts(merged_from, merged_into, allow_override=False):
result = copy.copy(merged_into)
for key, value in merged_from.items():
if not allow_override and key in merged_into:
raise RuntimeError('Duplicate definition of {0} in operation'
' properties and in kwargs. To allow '
'redefinition, pass '
'"allow_kwargs_override" to '
'"execute_operation"'.format(key))
result[key] = value
return result
def update_execution_status(self, new_status):
"""Updates the execution status to new_status.
Note that the workflow status gets automatically updated before and
after its run (whether the run succeeded or failed)
"""
return self._process_task(UpdateExecutionStatusTask(
status=new_status,
workflow_context=self,
))
def _build_cloudify_context(self,
task_id,
task_name,
node_context,
timeout,
timeout_recoverable):
node_context = node_context or {}
context = {
'__cloudify_context': '0.3',
'type': 'operation',
'task_id': task_id,
'task_name': task_name,
'execution_id': self.execution_id,
'workflow_id': self.workflow_id,
'tenant': self.tenant,
'timeout': timeout,
'timeout_recoverable': timeout_recoverable
}
context.update(node_context)
context.update(self.internal.handler.operation_cloudify_context)
return context
def execute_task(self,
task_name,
local=True,
task_queue=None,
task_target=None,
kwargs=None,
node_context=None,
send_task_events=DEFAULT_SEND_TASK_EVENTS,
total_retries=None,
retry_interval=None,
timeout=None,
timeout_recoverable=None):
"""
Execute a task
:param task_name: the task named
:param kwargs: optional kwargs to be passed to the task
:param node_context: Used internally by node.execute_operation
"""
# Should deepcopy cause problems here, remove it, but please make
# sure that WORKFLOWS_WORKER_PAYLOAD is not global in manager repo
kwargs = copy.deepcopy(kwargs) or {}
task_id = str(uuid.uuid4())
cloudify_context = self._build_cloudify_context(
task_id,
task_name,
node_context,
timeout,
timeout_recoverable)
kwargs['__cloudify_context'] = cloudify_context
if self.dry_run:
return DryRunLocalWorkflowTask(
local_task=lambda: None,
workflow_context=self,
name=task_name,
kwargs=kwargs
)
if local:
# oh sweet circular dependency
from cloudify import dispatch
return self.local_task(local_task=dispatch.dispatch,
info=task_name,
name=task_name,
kwargs=kwargs,
task_id=task_id,
send_task_events=send_task_events,
total_retries=total_retries,
retry_interval=retry_interval)
else:
return self.remote_task(task_queue=task_queue,
task_target=task_target,
kwargs=kwargs,
cloudify_context=cloudify_context,
task_id=task_id,
send_task_events=send_task_events,
total_retries=total_retries,
retry_interval=retry_interval)
def local_task(self,
local_task,
node=None,
info=None,
kwargs=None,
task_id=None,
name=None,
send_task_events=DEFAULT_SEND_TASK_EVENTS,
override_task_config=False,
total_retries=None,
retry_interval=None):
"""
Create a local workflow task
:param local_task: A callable implementation for the task
:param node: A node if this task is called in a node context
:param info: Additional info that will be accessed and included
in log messages
:param kwargs: kwargs to pass to the local_task when invoked
:param task_id: The task id
"""
global_task_config = self.internal.get_task_configuration()
if hasattr(local_task, 'workflow_task_config'):
decorator_task_config = local_task.workflow_task_config
else:
decorator_task_config = {}
invocation_task_config = dict(
local_task=local_task,
node=node,
info=info,
kwargs=kwargs,
send_task_events=send_task_events,
task_id=task_id,
name=name)
if total_retries is not None:
invocation_task_config['total_retries'] = total_retries
if retry_interval is not None:
invocation_task_config['retry_interval'] = retry_interval
final_task_config = {}
final_task_config.update(global_task_config)
if override_task_config:
final_task_config.update(decorator_task_config)
final_task_config.update(invocation_task_config)
else:
final_task_config.update(invocation_task_config)
final_task_config.update(decorator_task_config)
return self._process_task(LocalWorkflowTask(
workflow_context=self,
**final_task_config))
def remote_task(self,
kwargs,
cloudify_context,
task_id,
task_queue=None,
task_target=None,
send_task_events=DEFAULT_SEND_TASK_EVENTS,
total_retries=None,
retry_interval=None):
"""
Create a remote workflow task
:param cloudify_context: A dict for creating the CloudifyContext
used by the called task
:param task_id: The task id
"""
task_configuration = self.internal.get_task_configuration()
if total_retries is not None:
task_configuration['total_retries'] = total_retries
if retry_interval is not None:
task_configuration['retry_interval'] = retry_interval
return self._process_task(
RemoteWorkflowTask(kwargs=kwargs,
cloudify_context=cloudify_context,
task_target=task_target,
task_queue=task_queue,
workflow_context=self,
task_id=task_id,
send_task_events=send_task_events,
**task_configuration))
def _process_task(self, task):
if self.internal.graph_mode:
return task
else:
self.internal.task_graph.add_task(task)
return task.apply_async()
def get_operations(self, graph_id):
return self.internal.handler.get_operations(graph_id)
def update_operation(self, operation_id, state,
result=None, exception=None):
return self.internal.handler.update_operation(
operation_id, state, result, exception)
def get_tasks_graph(self, name):
return self.internal.handler.get_tasks_graph(self.execution_id, name)
def store_tasks_graph(self, name, operations=None):
return self.internal.handler.store_tasks_graph(
self.execution_id, name, operations=operations)
def store_operation(self, task, dependencies, graph_id):
return self.internal.handler.store_operation(
graph_id=graph_id, dependencies=dependencies, **task.dump())
def remove_operation(self, operation_id):
return self.internal.handler.remove_operation(operation_id)
def get_execution(self, execution_id=None):
"""
Ge the execution object for the current execution
:param execution_id: The Id of the execution object
:return: Instance of `Execution` object which holds all the needed info
"""
if not execution_id:
execution_id = self.execution_id
return self.internal.handler.get_execution(execution_id)
class WorkflowNodesAndInstancesContainer(object):
def __init__(self, workflow_context, raw_nodes, raw_node_instances):
self.workflow_context = workflow_context
self._nodes = dict(
(node.id, CloudifyWorkflowNode(workflow_context, node, self))
for node in raw_nodes)
self._node_instances = dict(
(instance.id, CloudifyWorkflowNodeInstance(
workflow_context, self._nodes[instance.node_id], instance,
self))
for instance in raw_node_instances)
for inst in self._node_instances.values():
for rel in inst.relationships:
if rel.relationship.is_derived_from(
"cloudify.relationships.contained_in"):
rel.target_node_instance._add_contained_node_instance(inst)
@property
def nodes(self):
return iter(self._nodes.values())
@property
def node_instances(self):
return iter(self._node_instances.values())
def get_node(self, node_id):
"""
Get a node by its id
:param node_id: The node id
:return: a CloudifyWorkflowNode instance for the node or None if
not found
"""
return self._nodes.get(node_id)
def get_node_instance(self, node_instance_id):
"""
Get a node instance by its id
:param node_instance_id: The node instance id
:return: a CloudifyWorkflowNode instance for the node or None if
not found
"""
return self._node_instances.get(node_instance_id)
def refresh_node_instances(self):
raw_nodes = self.internal.handler.get_nodes()
self._nodes = dict(
(node.id, CloudifyWorkflowNode(self.workflow_context, node, self))
for node in raw_nodes)
raw_node_instances = self.internal.handler.get_node_instances()
self._node_instances = dict(
(instance.id, CloudifyWorkflowNodeInstance(
self.workflow_context, self._nodes[instance.node_id], instance,
self))
for instance in raw_node_instances)
class CloudifyWorkflowContext(
_WorkflowContextBase,
WorkflowNodesAndInstancesContainer
):
"""
A context used in workflow operations
:param ctx: a cloudify_context workflow dict
"""
def __init__(self, ctx):
self.blueprint = context.BlueprintContext(ctx)
self.deployment = WorkflowDeploymentContext(ctx, self)
with current_workflow_ctx.push(self):
# Not using super() here, because
# WorkflowNodesAndInstancesContainer's __init__() needs some data
# to be prepared before calling it. It would be possible to
# overcome this by using kwargs + super(...).__init__() in
# _WorkflowContextBase, but the way it is now is self-explanatory.
_WorkflowContextBase.__init__(self, ctx,
RemoteCloudifyWorkflowContextHandler)
raw_nodes = self.internal.handler.get_nodes()
raw_node_instances = self.internal.handler.get_node_instances()
WorkflowNodesAndInstancesContainer.__init__(self, self, raw_nodes,
raw_node_instances)
def _build_cloudify_context(self, *args):
context = super(
CloudifyWorkflowContext,
self
)._build_cloudify_context(*args)
context.update({
'blueprint_id': self.blueprint.id,
'deployment_id': self.deployment.id
})
return context
class CloudifySystemWideWorkflowContext(_WorkflowContextBase):
def __init__(self, ctx):
with current_workflow_ctx.push(self):
super(CloudifySystemWideWorkflowContext, self).__init__(
ctx,
SystemWideWfRemoteContextHandler
)
self._dep_contexts = None
class _ManagedCloudifyWorkflowContext(CloudifyWorkflowContext):
def __enter__(self):
self.internal.start_local_tasks_processing()
def __exit__(self, *args, **kwargs):
self.internal.stop_local_tasks_processing()
@property
def deployments_contexts(self):
if self.local:
raise RuntimeError(
'deployment_contexts do not exist in local workflows')
if self._dep_contexts is None:
self._dep_contexts = {}
deployments_list = \
self.internal.handler.rest_client.deployments.list(
_include=['id', 'blueprint_id'],
_get_all_results=True
)
for dep in deployments_list:
# Failure to deepcopy will cause snapshot restore context hack
# to be reset just before it's needed.
dep_ctx = copy.deepcopy(self._context)
dep_ctx['tenant']['name'] = self.tenant_name
dep_ctx['deployment_id'] = dep.id
dep_ctx['blueprint_id'] = dep.blueprint_id
def lazily_loaded_ctx(dep_ctx):
def lazy_ctx():
if not hasattr(lazy_ctx, '_cached_ctx'):
lazy_ctx._cached_ctx = \
self._ManagedCloudifyWorkflowContext(dep_ctx)
return lazy_ctx._cached_ctx
return proxy(lazy_ctx)
self._dep_contexts[dep.id] = lazily_loaded_ctx(dep_ctx)
return self._dep_contexts
class CloudifyWorkflowContextInternal(object):
def __init__(self, workflow_context, handler):
self.workflow_context = workflow_context
self.handler = handler
self._bootstrap_context = None
self._graph_mode = False
# the graph is always created internally for events to work properly
# when graph mode is turned on this instance is returned to the user.
subgraph_task_config = self.get_subgraph_task_configuration()
self._task_graph = TaskDependencyGraph(
workflow_context=workflow_context,
default_subgraph_task_config=subgraph_task_config)
# local task processing
thread_pool_size = self.workflow_context._local_task_thread_pool_size
self.local_tasks_processor = LocalTasksProcessing(
self.workflow_context,
thread_pool_size=thread_pool_size)
def get_task_configuration(self):
bootstrap_context = self._get_bootstrap_context()
workflows = bootstrap_context.get('workflows', {})
total_retries = workflows.get(
'task_retries',
self.workflow_context._task_retries)
retry_interval = workflows.get(
'task_retry_interval',
self.workflow_context._task_retry_interval)
return dict(total_retries=total_retries,
retry_interval=retry_interval)
def get_subgraph_task_configuration(self):
bootstrap_context = self._get_bootstrap_context()
workflows = bootstrap_context.get('workflows', {})
subgraph_retries = workflows.get(
'subgraph_retries',
self.workflow_context._subgraph_retries
)
return | |
self.pad_amount:self.pad_amount + length]
else:
real = real[:, :length]
return real
def extra_repr(self) -> str:
return 'n_fft={}, Fourier Kernel size={}, iSTFT={}, trainable={}'.format(
self.n_fft, (*self.wsin.shape,), self.iSTFT, self.trainable
)
class MelSpectrogram(torch.nn.Module):
"""This function is to calculate the Melspectrogram of the input signal.
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred automatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio.
It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_fft : int
The window size for the STFT. Default value is 2048
n_mels : int
The number of Mel filter banks. The filter banks maps the n_fft to mel bins.
Default value is 128.
hop_length : int
The hop (or stride) size. Default value is 512.
window : str
The windowing function for STFT. It uses ``scipy.signal.get_window``, please refer to
scipy documentation for possible windowing functions. The default value is 'hann'.
center : bool
Putting the STFT keneral at the center of the time-step or not. If ``False``,
the time index is the beginning of the STFT kernel, if ``True``, the time index is the
center of the STFT kernel. Default value if ``True``.
pad_mode : str
The padding method. Default value is 'reflect'.
htk : bool
When ``False`` is used, the Mel scale is quasi-logarithmic. When ``True`` is used, the
Mel scale is logarithmic. The default value is ``False``.
fmin : int
The starting frequency for the lowest Mel filter bank.
fmax : int
The ending frequency for the highest Mel filter bank.
trainable_mel : bool
Determine if the Mel filter banks are trainable or not. If ``True``, the gradients for Mel
filter banks will also be calculated and the Mel filter banks will be updated during model
training. Default value is ``False``.
trainable_STFT : bool
Determine if the STFT kenrels are trainable or not. If ``True``, the gradients for STFT
kernels will also be caluclated and the STFT kernels will be updated during model training.
Default value is ``False``.
verbose : bool
If ``True``, it shows layer information. If ``False``, it suppresses all prints.
device : str
Choose which device to initialize this layer. Default value is 'cpu'.
Returns
-------
spectrogram : torch.tensor
It returns a tensor of spectrograms. shape = ``(num_samples, freq_bins,time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MelSpectrogram()
>>> specs = spec_layer(x)
"""
def __init__(self, sr=22050, n_fft=2048, n_mels=128, hop_length=512,
window='hann', center=True, pad_mode='reflect', power=2.0, htk=False,
fmin=0.0, fmax=None, norm=1, trainable_mel=False, trainable_STFT=False,
verbose=True, **kwargs):
super().__init__()
self.stride = hop_length
self.center = center
self.pad_mode = pad_mode
self.n_fft = n_fft
self.power = power
self.trainable_mel = trainable_mel
self.trainable_STFT = trainable_STFT
self.verbose = verbose
# Preparing for the stft layer. No need for center
self.stft = STFT(n_fft=n_fft, freq_bins=None, hop_length=hop_length, window=window,
freq_scale='no', center=center, pad_mode=pad_mode, sr=sr, trainable=trainable_STFT,
output_format="Magnitude", verbose=verbose, **kwargs)
# Create filter windows for stft
start = time()
# Creating kernel for mel spectrogram
start = time()
mel_basis = mel(sr, n_fft, n_mels, fmin, fmax, htk=htk, norm=norm)
mel_basis = torch.tensor(mel_basis)
if verbose==True:
print("STFT filter created, time used = {:.4f} seconds".format(time()-start))
print("Mel filter created, time used = {:.4f} seconds".format(time()-start))
else:
pass
if trainable_mel:
# Making everything nn.Parameter, so that this model can support nn.DataParallel
mel_basis = torch.nn.Parameter(mel_basis, requires_grad=trainable_mel)
self.register_parameter('mel_basis', mel_basis)
else:
self.register_buffer('mel_basis', mel_basis)
# if trainable_mel==True:
# self.mel_basis = torch.nn.Parameter(self.mel_basis)
# if trainable_STFT==True:
# self.wsin = torch.nn.Parameter(self.wsin)
# self.wcos = torch.nn.Parameter(self.wcos)
def forward(self, x):
"""
Convert a batch of waveforms to Mel spectrograms.
Parameters
----------
x : torch tensor
Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
It will be automatically broadcast to the right shape
"""
x = broadcast_dim(x)
spec = self.stft(x, output_format='Magnitude')**self.power
melspec = torch.matmul(self.mel_basis, spec)
return melspec
def extra_repr(self) -> str:
return 'Mel filter banks size = {}, trainable_mel={}'.format(
(*self.mel_basis.shape,), self.trainable_mel, self.trainable_STFT
)
def to_stft(self, melspec, max_steps=1000, loss_threshold=1e-8, grad_threshold=1e-7, random_start=False, sgd_kwargs=None, eps=1e-12, return_extras=False, verbose=None):
"""
Best-attempt spectrogram inversion
"""
def loss_fn(pred, target):
pred = pred.unsqueeze(1) if pred.ndim == 3 else pred
target = target.unsqueeze(1) if target.ndim == 3 else target
loss = (pred - target).pow(2).sum(-2).mean()
return loss
verbose = verbose or self.verbose
# SGD arguments
default_sgd_kwargs = dict(lr=1e3, momentum=0.9)
if sgd_kwargs:
default_sgd_kwargs.update(sgd_kwargs)
sgd_kwargs = default_sgd_kwargs
mel_basis = self.mel_basis.detach()
shape = melspec.shape
batch_size, n_mels, time = shape[0], shape[-2], shape[-1]
_, n_freq = mel_basis.shape
melspec = melspec.detach().view(-1, n_mels, time)
if random_start:
pred_stft_shape = (batch_size, n_freq, time)
pred_stft = torch.zeros(*pred_stft_shape, dtype=torch.float32, device=mel_basis.device).normal_().clamp_(eps)
else:
pred_stft = (torch.pinverse(mel_basis) @ melspec).clamp(eps)
pred_stft = nn.Parameter(pred_stft, requires_grad=True)
sgd_kwargs["lr"] = sgd_kwargs["lr"] * batch_size
optimizer = torch.optim.SGD([pred_stft], **sgd_kwargs)
losses = []
for i in range(max_steps):
optimizer.zero_grad()
pred_mel = mel_basis @ pred_stft
loss = loss_fn(pred_mel, melspec)
losses.append(loss.item())
loss.backward()
optimizer.step()
# Check conditions
if not loss.isfinite():
raise OverflowError("Overflow encountered in Mel -> STFT optimization")
if loss_threshold and loss < loss_threshold:
if verbose:
print(f"Target error of {loss_threshold} reached. Stopping optimization.")
break
if grad_threshold and pred_stft.grad.max() < grad_threshold:
if verbose:
print(f"Target max gradient of {grad_threshold} reached. Stopping optimization.")
break
pred_stft = pred_stft.detach().clamp(eps) ** 0.5
pred_stft = pred_stft.view((*shape[:-2], n_freq, time))
if return_extras:
return pred_stft, pred_mel.detach(), losses
return pred_stft
def inverse(self, melspec, mel_inversion_params=None, stft_inversion_params=None):
default_mel_inversion_params = {}
default_stft_inversion_params = {}
mel_inversion_params = mel_inversion_params or {}
stft_inversion_params = stft_inversion_params or {}
if mel_inversion_params:
mel_inversion_params = {**default_mel_inversion_params, **mel_inversion_params}
if stft_inversion_params:
stft_inversion_params = {**default_stft_inversion_params, **stft_inversion_params}
recon_stft = self.to_stft(melspec, **mel_inversion_params)
recon_audio = self.stft.inverse(recon_stft, **stft_inversion_params)
return recon_audio
class MFCC(torch.nn.Module):
"""This function is to calculate the Mel-frequency cepstral coefficients (MFCCs) of the input signal.
This algorithm first extracts Mel spectrograms from the audio clips,
then the discrete cosine transform is calcuated to obtain the final MFCCs.
Therefore, the Mel spectrogram part can be made trainable using
``trainable_mel`` and ``trainable_STFT``.
It only support type-II DCT at the moment. Input signal should be in either of the following shapes.\n
1. ``(len_audio)``\n
2. ``(num_audio, len_audio)``\n
3. ``(num_audio, 1, len_audio)``
The correct shape will be inferred autommatically if the input follows these 3 shapes.
Most of the arguments follow the convention from librosa.
This class inherits from ``torch.nn.Module``, therefore, the usage is same as ``torch.nn.Module``.
Parameters
----------
sr : int
The sampling rate for the input audio. It is used to calculate the correct ``fmin`` and ``fmax``.
Setting the correct sampling rate is very important for calculating the correct frequency.
n_mfcc : int
The number of Mel-frequency cepstral coefficients
norm : string
The default value is 'ortho'. Normalization for DCT basis
**kwargs
Other arguments for Melspectrogram such as n_fft, n_mels, hop_length, and window
Returns
-------
MFCCs : torch.tensor
It returns a tensor of MFCCs. shape = ``(num_samples, n_mfcc, time_steps)``.
Examples
--------
>>> spec_layer = Spectrogram.MFCC()
>>> mfcc = spec_layer(x)
"""
def __init__(self, sr=22050, n_mfcc=20, norm='ortho', verbose=True, ref=1.0, amin=1e-10, top_db=80.0, **kwargs):
super().__init__()
self.melspec_layer = MelSpectrogram(sr=sr, verbose=verbose, **kwargs)
self.m_mfcc = n_mfcc
# attributes that will be used for _power_to_db
if amin <= 0:
raise ParameterError('amin must be strictly positive')
amin = torch.tensor([amin])
ref = torch.abs(torch.tensor([ref]))
self.register_buffer('amin', amin)
self.register_buffer('ref', ref)
self.top_db = top_db
self.n_mfcc = n_mfcc
def _power_to_db(self, S):
'''
Refer to https://librosa.github.io/librosa/_modules/librosa/core/spectrum.html#power_to_db
for the original implmentation.
'''
log_spec = 10.0 * torch.log10(torch.max(S, self.amin))
log_spec -= 10.0 * torch.log10(torch.max(self.amin, self.ref))
if self.top_db is not None:
if self.top_db < 0:
raise ParameterError('top_db must be non-negative')
# make the dim same as log_spec so that it can be broadcasted
batch_wise_max = log_spec.flatten(1).max(1)[0].unsqueeze(1).unsqueeze(1)
log_spec = torch.max(log_spec, batch_wise_max - self.top_db)
return log_spec
def _dct(self, x, norm=None):
'''
Refer to https://github.com/zh217/torch-dct for the original implmentation.
'''
x = x.permute(0,2,1) # make freq the last axis, since dct applies to the frequency axis
x_shape = x.shape
N = x_shape[-1]
v = torch.cat([x[:, :, ::2], x[:, :, 1::2].flip([2])], dim=2)
Vc = | |
from collections import namedtuple, defaultdict
from torch.utils.data import Dataset
from torch import Generator
import torch
import numpy as np
from typing import Sequence, Optional, Dict, Union
# UGH: Pytorch dataloaders don't support dataclasses, only namedtuples
# See https://github.com/pytorch/pytorch/blob/9baf75c86edc7f6cd1c04bf9f42d18bc0d05f504/torch/utils/data/_utils/collate.py#L43
LabeledDatapoint = namedtuple(
"LabeledDatapoint", ("x", "y", "w"), defaults=(None, None, 1)
)
GroupedLabeledDatapoint = namedtuple(
"GroupedLabeledDatapoint", ("x", "y", "w", "g"), defaults=(None, None, 1, None)
)
ExtraLabeledDatapoint = namedtuple(
"ExtraLabeledDatapoint",
("x", "y", "w", "g", "extra"),
defaults=(None, None, 1, None, False),
)
class UndersampledByGroupDataset(Dataset):
""" Wraps a map Dataset into an dataset undersampled by group.
Args:
dataset (Dataset): The whole Dataset
group_ids (Sequent[int]): Each entry i is the group id of dataset[i]
new_group_sizes (dict or list): new_group_sizes[g] returns the desired group
size to undersample to for group g.
generator (Optional[Generator]): torch.Generator
Note that we can undersample by labels if `group_ids[i]` is the label of dataset[i]
"""
def __init__(
self,
dataset: Dataset,
group_ids: Sequence[int],
new_group_sizes: Optional[Union[Sequence[int], Dict[int, int]]] = None,
new_group_fracs: Optional[Union[Sequence[float], Dict[int, float]]] = None,
generator=None,
):
if (new_group_sizes is not None) and (new_group_fracs is not None):
raise ValueError(
f"new_group_sizes and new_group_fracs cannot both be specified"
)
if isinstance(new_group_sizes, list):
new_group_sizes = dict(enumerate(new_group_sizes))
elif isinstance(new_group_fracs, list):
new_group_fracs = dict(enumerate(new_group_fracs))
group_idxs = defaultdict(list)
for i, g in enumerate(group_ids):
group_idxs[int(g)].append(i)
if new_group_fracs is not None:
raise NotImplementedError
# new_group_sizes = {
# g: int(len(group_idxs[g]) * frac) for g, frac in new_group_fracs
# }
for g in group_idxs.keys():
assert new_group_sizes[g] <= len(
group_idxs[g]
), f"Group {g} has only {len(group_idxs[g])} samples, which is less than {new_group_sizes[g]} "
indices = []
if generator is None:
generator = torch.Generator()
generator.manual_seed(
0
) # ensure that we always choose the same examples with a group
for g, idxs in group_idxs.items():
idxs = torch.tensor(idxs)
new_size = new_group_sizes[g]
# equivalent of np.random.choice without replacement
choice = torch.randperm(len(idxs), generator=generator)[:new_size]
chosen_idxs = idxs[choice]
indices.append(chosen_idxs)
self.indices = torch.cat(indices)
self.dataset = dataset
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
class UndersampledDataset(Dataset):
""" Wraps a map Dataset into an undersampled dataset.
Points may be dropped from implementing undersampling via rejection sampling.
tldr: This will exclude points from the given Dataset!
Args:
dataset (Dataset): The whole Dataset
weights (sequence): The importance weights of each element of `dataset`.
weights[i] should be equal to the likelihood ratio of dataset[i] between
the target distribution and the source distribution
weights_upper_bound (Optional[float]): an optional upper bound on `weights`for rejection sampling
generator (Optional[Generator]): torch.Generator
"""
def __init__(
self,
dataset: Dataset,
weights: Sequence[float],
weights_upper_bound: Optional[float] = None,
generator: Optional[Generator] = None,
):
self.dataset = dataset
self.weights = weights
self.generator = generator
assert len(weights) == len(dataset)
weights = torch.tensor(weights)
assert weights.ndim == 1
if weights_upper_bound is None:
weights_upper_bound = weights.max().item()
unif_rv = torch.rand(weights.size(), generator=generator)
# only keep these indices
indices = torch.nonzero(
unif_rv <= weights / weights_upper_bound, as_tuple=True
)[0]
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
class OversampledDataset(Dataset):
"""Wraps a map Dataset into an oversampled dataset that oversamples by duplicating
points in the original Dataset. All points in the original Dataset are kept
tldr: This will include all points from the given Dataset and some duplicates!
Args:
dataset (Dataset): The whole Dataset
weights (sequence): The importance weights of each element of `dataset`.
weights[i] should be equal to the likelihood ratio of dataset[i] between
the target distribution and the source distribution
new_size (int): The desired size of the new dataset
generator (Optional[Generator]): torch.Generator
"""
def __init__(
self,
dataset: Dataset,
weights: Sequence[float],
new_size: int,
generator: Optional[Generator] = None,
):
self.dataset = dataset
self.weights = weights
self.generator = generator
assert weights.ndim == 1
assert len(weights) == len(dataset)
num_needed = new_size - len(dataset)
assert num_needed >= 0
weights = torch.tensor(weights)
normalized_weights = weights / weights.sum(0)
additional_indices = torch.multinomial(
normalized_weights, num_needed, replacement=True, generator=generator
)
# append on additional samples
self.indices = torch.cat([torch.arange(len(dataset)), additional_indices])
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
# class OversampledUndersampledDataset(Dataset):
# """
#
# """
#
# def __init__(
# self,
# dataset: Dataset,
# weights: Sequence[int],
# exponent: float,
# new_size: Optional[int] = None,
# weights_upper_bound: Optional[float] = None,
# generator: Optional[Generator] = None,
# ):
# # when exponent == 0, we only undersample
# # when exponent == 1, we only oversample
# assert 0 <= exponent <= 1
#
# undersampling_weights = torch.tensor(weights) ** (1 - exponent)
# undersampled_dataset = UndersampledDataset(
# dataset,
# weights=undersampling_weights,
# weights_upper_bound=weights_upper_bound,
# generator=generator,
# )
#
# if exponent == 0:
# print(
# "OversampledUndersampledDataset exponent is 0. Cannot oversample to requested new_size of {new_size}"
# )
# new_size = len(undersampled_dataset)
# elif new_size is None:
# raise ValueError("new_size cannot be None when exponent is nonzero!")
#
# oversampling_weights = (
# torch.tensor([weights[i] for i in undersampled_dataset.indices]) ** exponent
# )
# oversampled_dataset = OversampledDataset(
# undersampled_dataset,
# weights=oversampling_weights,
# new_size=new_size,
# generator=generator,
# )
#
# self.dataset = oversampled_dataset
#
# def __getitem__(self, idx):
# return self.dataset[idx]
#
# def __len__(self):
# return len(self.dataset)
class ReweightedDataset(Dataset):
"""Wraps a map Dataset into a reweighted dataset.
Each time we __getitem__ we'll also get a weight `w` as part of our returned
tuple
Args:
dataset (Dataset): The whole Dataset
weights (sequence): The importance weights of each element of `dataset`.
weights[i] should be equal to the likelihood ratio of dataset[i] between
the target distribution and the source distribution
generator (Optional[Generator]): torch.Generator
"""
def __init__(
self,
dataset: Dataset,
weights: Sequence[float],
generator: Optional[Generator] = None,
weight_exponent: float = 1.0,
):
weights = weights ** weight_exponent
self.weight_exponent = weight_exponent
self.dataset = dataset
self.normalization = sum(weights) / len(weights)
self.weights = weights / self.normalization
self.generator = generator
def __getitem__(self, idx):
datapoint = self.dataset[idx]
w = self.weights[idx]
if isinstance(datapoint, tuple) and hasattr(
datapoint, "_fields"
): # check if namedtuple
datapoint = datapoint._replace(w=w)
else:
datapoint = datapoint + (w,)
return datapoint
def __len__(self):
return len(self.dataset)
class ResampledDataset(Dataset):
"""Wraps a map Dataset into a resampled dataset with replacement.
tldr: This may drop some points and create some duplicated points from the given Dataset!
Args:
dataset (Dataset): The whole Dataset
weights (sequence): The importance weights of each element of `dataset`.
weights[i] should be equal to the likelihood ratio of dataset[i] between
the target distribution and the source distribution
generator (Optional[Generator]): torch.Generator
"""
def __init__(
self,
dataset: Dataset,
weights: Sequence[float],
new_size: int,
generator: Optional[Generator] = None,
):
self.dataset = dataset
self.weights = weights
self.generator = generator
assert weights.ndim == 1
assert len(weights) == len(dataset)
weights = torch.tensor(weights)
normalized_weights = weights / weights.sum(0)
indices = torch.multinomial(
normalized_weights, new_size, replacement=True, generator=generator
)
self.indices = indices
def __getitem__(self, idx):
return self.dataset[self.indices[idx]]
def __len__(self):
return len(self.indices)
def undersampling_schedule(
weights: torch.Tensor, T: int, annealing_fn: str, annealing_params: tuple,
):
if annealing_fn == "linear":
def g(weights, t, T):
return weights ** (1 - t / T)
elif annealing_fn == "sigmoid":
def sigmoid(x):
return np.where(
x >= 0, 1 / (1 + np.exp(-x)), np.exp(x) / (1 + np.exp(x))
).item()
c = annealing_params[0]
def g(weights, t, T):
x = t / T
return weights ** sigmoid(-c * (x - 0.5))
elif annealing_fn == "step":
def g(weights, t, T):
return torch.ones_like(weights) if t > T // 2 else weights
else:
raise ValueError(f"Annealing function {annealing_fn} not supported")
weights = weights / torch.max(weights)
rv = torch.rand(*weights.shape)
for t in range(T + 1):
keep_idx = rv <= g(weights, t, T)
idx = torch.nonzero(keep_idx, as_tuple=True)[0]
weights_t = weights[idx] / g(weights[keep_idx], t, T)
yield idx, weights_t
class UndersampledWithExtrasDataset(UndersampledDataset):
def __init__(
self,
dataset: Dataset,
weights: Sequence[float],
weights_upper_bound: Optional[float] = None,
generator: Optional[Generator] = None,
):
super().__init__(
dataset=dataset,
weights=weights,
weights_upper_bound=weights_upper_bound,
generator=generator,
)
self.undersampled_indices = self.indices
del self.indices
all_indices = torch.arange(len(self.dataset))
extra = torch.ones(len(all_indices), dtype=torch.bool)
extra[self.undersampled_indices] = False
self.extra = extra
def __getitem__(self, idx):
item = self.dataset[idx]
extra = self.extra[idx]
wrapped_item = item + (extra,)
wrapped_item = ExtraLabeledDatapoint(*wrapped_item)
return wrapped_item
def __len__(self):
return len(self.dataset)
class SubsetOfGroupedDataset(Dataset):
def __init__(
self,
dataset: Dataset,
indices: Sequence[int],
old_to_new_class: Optional[Dict[int, int]] = None,
old_to_new_group: Optional[Dict[int, int]] = None,
):
self.dataset = dataset
self.indices = indices
if old_to_new_class is not None:
old_to_new_class_arr = 999 * np.ones(
max(old_to_new_class.keys()) + 1, dtype=np.int
)
old = np.array(list(old_to_new_class.keys()))
new = np.array(list(old_to_new_class.values()))
old_to_new_class_arr[old] = new
old_to_new_class = old_to_new_class_arr
self.old_to_new_class = old_to_new_class
if old_to_new_group is not None:
old_to_new_group_arr = 999 * np.ones(
max(old_to_new_group.keys()) + 1, dtype=np.int
)
old = np.array(list(old_to_new_group.keys()))
new = np.array(list(old_to_new_group.values()))
old_to_new_group_arr[old] = new
old_to_new_group = old_to_new_group_arr
self.old_to_new_group = old_to_new_group
def __getitem__(self, idx):
datapoint = | |
an admin.")
mvec_qs = ManualVariantEntryCollection.objects.order_by("-id")
context = {"form": form,
"mvec_qs": mvec_qs}
return render(request, 'snpdb/data/manual_variant_entry.html', context=context)
@require_POST
def set_user_row_config(request):
""" This is set from jqgrid.html setRowChangeCallbacks when changing grid rows """
grid_name = request.POST["grid_name"]
grid_rows = int(request.POST["grid_rows"])
UserGridConfig.objects.update_or_create(user=request.user, grid_name=grid_name, defaults={"rows": grid_rows})
return HttpResponse()
@require_POST
def set_user_data_grid_config(request):
""" This is set from user_data_grid_filter.html, should contain either filter_level+checked or filter_name """
grid_name = request.POST["grid_name"]
user_grid_config = UserGridConfig.get(request.user, grid_name)
filter_level = request.POST.get("filter_level")
if filter_level:
checked = json.loads(request.POST["checked"])
if filter_level == 'groups':
user_grid_config.show_group_data = checked
elif filter_level == 'incomplete':
user_grid_config.show_incomplete_data = checked
elif filter_level == 'hidden':
user_grid_config.show_hidden_data = checked
else:
msg = f"Unknown value for filter_level: '{filter_level}'"
raise ValueError(msg)
else:
user_grid_config.filter_name = request.POST["filter_name"]
user_grid_config.save()
return HttpResponse()
def view_user_settings(request):
user = request.user
user_contact = UserContact.get_for_user(user)
action = request.POST.get('action') if request.POST else None
post = request.POST or None if not action else None
user_form = UserForm(post, instance=user)
user_contact_form = UserContactForm(post, instance=user_contact)
user_settings = UserSettings.get_for_user(user)
override_source, override_values = user_settings.get_override_source_and_values_before_user()
user_settings_override = UserSettingsOverride.objects.get(user=user)
user_settings_override_form = UserSettingsOverrideForm(post, instance=user_settings_override)
labs_by_group_name = {l.group_name: l for l in Lab.valid_labs_qs(user)}
group_initial_perm_forms = {}
if settings.USER_SETTINGS_SHOW_GROUPS:
read_groups, write_groups = user_settings.initial_perm_read_and_write_groups
for group in user.groups.all().order_by('name'):
initial = {"read": group in read_groups, "write": group in write_groups}
group_initial_perm_forms[group] = SettingsInitialGroupPermissionForm(request.POST or None, initial=initial,
settings_override=user_settings_override,
group=group)
if request.method == "POST":
all_valid = True
action = request.POST.get('action')
if action == 'password-reset':
keycloak = Keycloak()
keycloak.change_password(user)
messages.add_message(request, level=messages.INFO, message='Password reset email sent',
extra_tags='save-message')
else:
if not settings.USE_OIDC:
if user_form.is_valid():
user = user_form.save()
else:
all_valid = False
for form in itertools.chain([user_contact_form, user_settings_override_form],
group_initial_perm_forms.values()):
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "User Settings")
context = {
'user': user,
'user_form': user_form,
'user_contact_form': user_contact_form,
'user_settings_form': user_settings_override_form,
'group_initial_perm_forms': group_initial_perm_forms,
'accounts_email': settings.ACCOUNTS_EMAIL,
'account_manage_url': settings.OIDC_USER_SERVICES,
'override_source': override_source,
'override_values': override_values,
'labs_by_group_name': labs_by_group_name,
'avatar_details': AvatarDetails.avatar_for(user)
}
return render(request, 'snpdb/settings/view_user_settings.html', context)
def user_settings_node_counts_tab(request):
user_settings_override = UserSettingsOverride.objects.get_or_create(user=request.user)[0]
return _settings_override_node_counts_tab(request, user_settings_override)
def lab_settings_node_counts_tab(request, pk):
lab = get_object_or_404(Lab, pk=pk)
has_write_permission = lab.can_write(request.user)
if has_write_permission is False:
_add_read_only_settings_message(request, [lab])
lab_settings_override = LabUserSettingsOverride.objects.get_or_create(lab=lab)[0]
return _settings_override_node_counts_tab(request, lab_settings_override, has_write_permission=has_write_permission)
def organization_settings_node_counts_tab(request, pk):
organization = get_object_or_404(Organization, pk=pk)
has_write_permission = organization.can_write(request.user)
if has_write_permission is False:
_add_read_only_settings_message(request, organization.lab_set.all())
org_settings_override = OrganizationUserSettingsOverride.objects.get_or_create(organization=organization)[0]
return _settings_override_node_counts_tab(request, org_settings_override, has_write_permission=has_write_permission)
def _settings_override_node_counts_tab(request, settings_override, has_write_permission=True):
# This calls _analysis_settings_node_counts_tab with a FakeAnalysis object that
# handles loading/saving a global one against User settings objects instead of analysis
class FakeAnalysis:
def set_node_count_types(self, node_counts_array):
collection, _ = NodeCountSettingsCollection.objects.get_or_create(settings=settings_override)
AbstractNodeCountSettings.save_count_configs_from_array(collection.nodecountsettings_set, node_counts_array)
def get_node_count_types(self):
try:
node_count_config = settings_override.nodecountsettingscollection
node_count_filters = node_count_config.get_node_count_filters()
except:
node_count_filters = BuiltInFilters.DEFAULT_NODE_COUNT_FILTERS
return AbstractNodeCountSettings.get_types_from_labels(node_count_filters)
fake_analysis = FakeAnalysis()
from analysis.views.views import _analysis_settings_node_counts_tab # Circular import
return _analysis_settings_node_counts_tab(request, fake_analysis,
pass_analysis_settings=False, has_write_permission=has_write_permission)
def view_user(request, pk):
user = get_object_or_404(User, pk=pk)
user_contact = UserContact.get_for_user(user)
context = {"user": user,
'user_contact': user_contact}
return render(request, 'snpdb/settings/view_user.html', context)
def _add_read_only_settings_message(request, lab_list: Iterable[Lab]):
""" lab_list: labs where lab heads can modify settings """
lab_heads_qs = LabHead.objects.filter(lab__in=lab_list).distinct()
lab_head_names = ", ".join([str(lh.user) for lh in lab_heads_qs])
if lab_head_names:
lab_head_msg = f" or lab heads: {lab_head_names}"
else:
lab_head_msg = ""
read_only_message = f"Only administrators{lab_head_msg} can modify these settings"
messages.add_message(request, messages.INFO, read_only_message)
def view_lab(request, pk):
lab = get_object_or_404(Lab, pk=pk)
lab_form = LabForm(request.POST or None, instance=lab)
lab_settings_override = LabUserSettingsOverride.objects.get_or_create(lab=lab)[0]
override_fields = set(get_model_fields(LabUserSettingsOverride)) - {"id", "settingsoverride_ptr", "lab"}
parent_overrides = UserSettings.get_settings_overrides(organization=lab.organization)
override_source, override_values = UserSettings.get_override_source_and_values(override_fields, parent_overrides)
settings_overrides = parent_overrides + [lab_settings_override]
read_groups, write_groups = UserSettings.get_initial_perm_read_and_write_groups([lab.group], settings_overrides)
initial = {"read": lab.group in read_groups, "write": lab.group in write_groups}
group_initial_perm_form = None
if settings.USER_SETTINGS_SHOW_GROUPS:
group_initial_perm_form = SettingsInitialGroupPermissionForm(request.POST or None, initial=initial,
settings_override=lab_settings_override,
group=lab.group)
lab_settings_override_form = LabUserSettingsOverrideForm(request.POST or None, instance=lab_settings_override)
has_write_permission = lab.can_write(request.user)
all_forms = [form for form in [lab_form, group_initial_perm_form, lab_settings_override_form] if form]
if request.method == "POST":
lab.check_can_write(request.user)
if debug_method := request.POST.get("debug_method"):
if "Test Slack" == debug_method:
if not lab.slack_webhook:
messages.add_message(request, messages.ERROR, "Slack URL not configured correctly")
else:
#try:
notification_builder = LabNotificationBuilder(lab=lab, message="Testing Slack Integration", notification_type=LabNotificationBuilder.NotificationType.SLACK_ONLY)
notification_builder.add_header(f"{settings.SITE_NAME} -> Slack Integration Test")
notification_builder.add_markdown("If you can see this, then integration has worked! :smile:")
notification_builder.send()
messages.add_message(request, messages.SUCCESS, "Message sent, check your Slack to confirm")
#except:
# report_exc_info()
# messages.add_message(request, messages.ERROR, "Unable to send test notification")
return redirect(reverse('view_lab', kwargs={"pk":pk}))
else:
raise ValueError(f"Un-supported debug method {debug_method}")
else:
all_valid = True
for form in all_forms:
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "Lab Settings")
if has_write_permission is False:
for form in all_forms:
set_form_read_only(form)
# we just hide the form now
# _add_read_only_settings_message(request, [lab])
if settings.VARIANT_CLASSIFICATION_STATS_USE_SHARED:
visibility = "Shared"
else:
visibility = f"Created"
context = {
"lab": lab,
"visibility": visibility,
"is_member": lab.is_member(request.user) or request.user.is_superuser,
"lab_form": lab_form,
'settings_override_form': lab_settings_override_form,
'group_initial_perm_form': group_initial_perm_form,
'override_source': override_source,
'override_values': override_values,
'has_write_permission': has_write_permission,
'clinvar_export_enabled': clinvar_export_sync.is_enabled
}
return render(request, 'snpdb/settings/view_lab.html', context)
def view_clinvar_key(request, pk: str):
clinvar_key = get_object_or_404(ClinVarKey, pk=pk)
clinvar_key.check_user_can_access(request.user)
return render(request, 'snpdb/settings/clinvar_key.html', {
'clinvar_key': clinvar_key,
'labs': Lab.objects.filter(clinvar_key=clinvar_key).order_by('name')
})
def view_organization(request, pk):
organization = get_object_or_404(Organization, pk=pk)
organization_form = OrganizationForm(request.POST or None, instance=organization)
org_settings_override = OrganizationUserSettingsOverride.objects.get_or_create(organization=organization)[0]
override_fields = set(get_model_fields(OrganizationUserSettingsOverride)) - {"id", "settingsoverride_ptr", "organization"}
parent_overrides = UserSettings.get_settings_overrides()
override_source, override_values = UserSettings.get_override_source_and_values(override_fields, parent_overrides)
org_settings_override_form = OrganizationUserSettingsOverrideForm(request.POST or None, instance=org_settings_override)
all_forms = [organization_form, org_settings_override_form]
if request.method == "POST":
organization.check_can_write(request.user)
all_valid = True
for form in all_forms:
if form.is_valid():
form.save()
else:
all_valid = False
add_save_message(request, all_valid, "Organization Settings")
has_write_permission = organization.can_write(request.user)
if has_write_permission is False:
for form in all_forms:
set_form_read_only(form)
# put on individual tabs now
# _add_read_only_settings_message(request, organization.lab_set.all())
context = {
"organization": organization,
"is_member": organization.is_member(request.user) or request.user.is_superuser,
"organization_form": organization_form,
'settings_override_form': org_settings_override_form,
'override_source': override_source,
'override_values': override_values,
'has_write_permission': has_write_permission,
}
return render(request, 'snpdb/settings/view_organization.html', context)
def custom_columns(request):
context = {}
form = forms.CustomColumnsCollectionForm(request.POST or None, user=request.user)
if request.method == "POST":
if form.is_valid():
ccc = form.save()
return HttpResponseRedirect(reverse("view_custom_columns", kwargs={"custom_columns_collection_id": ccc.pk}))
add_save_message(request, False, "Columns", created=True)
context["form"] = form
return render(request, 'snpdb/settings/custom_columns.html', context)
# Based on code from http://j-syk.com/weblog/2012/10/18/jquery-sortables-ajax-django/
def view_custom_columns(request, custom_columns_collection_id):
ccc = CustomColumnsCollection.get_for_user(request.user, custom_columns_collection_id)
custom_columns_qs = VariantGridColumn.objects.filter(customcolumn__custom_columns_collection=ccc)
my_columns = list(custom_columns_qs.order_by("customcolumn__sort_order"))
available_columns = list(VariantGridColumn.objects.exclude(grid_column_name__in=my_columns))
variant_grid_columns = {}
for vgc in VariantGridColumn.objects.all():
variant_grid_columns[vgc.pk] = vgc
has_write_permission = ccc.can_write(request.user)
if not has_write_permission:
msg = "You do not have permission to edit these columns. " \
"If you wish to customise them, click 'clone' and modify the copy"
messages.add_message(request, messages.WARNING, msg)
if request.method == "POST":
ccc.check_can_write(request.user)
if name := request.POST.get("name"):
ccc.name = name
ccc.save()
elif my_columns_str := request.POST.get("columns"):
def update_user_columns(id_list, active):
for i, col in enumerate(id_list):
column = variant_grid_columns[col]
CustomColumn.objects.update_or_create(custom_columns_collection=ccc, column=column,
defaults={"sort_order": i})
# Delete any not in id_list
CustomColumn.objects.filter(custom_columns_collection=ccc).exclude(column__in=id_list).delete()
my_columns_list = my_columns_str.split(',') if my_columns_str else []
active = 'my_columns' in request.POST
update_user_columns(my_columns_list, active)
return HttpResponse() # Nobody ever looks at this
context_dict = {
'available_columns_list': available_columns,
'my_columns_list': my_columns,
'custom_columns': ccc,
'has_write_permission': has_write_permission,
}
return render(request, 'snpdb/settings/view_custom_columns.html', context_dict)
def tag_settings(request):
form = forms.CreateTagForm(request.POST or None)
if request.method == "POST":
valid = form.is_valid()
if valid:
tag_name = form.cleaned_data['tag']
name = f"Tag {tag_name}"
try:
Tag.objects.create(pk=tag_name)
except:
valid = False
else:
name = "Tag"
add_save_message(request, valid, name, created=True)
user_tag_styles, user_tag_colors = UserTagColors.get_tag_styles_and_colors(request.user)
context_dict = {'form': form,
'user_tag_styles': user_tag_styles,
'user_tag_colors': user_tag_colors}
return render(request, 'snpdb/settings/tag_settings.html', context_dict)
@require_POST
def set_user_tag_color(request):
tag = request.POST['tag']
rgb = request.POST['rgb']
(utc, _) = UserTagColors.objects.get_or_create(user=request.user, tag_id=tag)
utc.rgb = rgb
utc.save()
logging.info("saved %s", utc)
return HttpResponse()
def igv_integration(request):
widgets = {"prefix": TextInput(attrs={'placeholder': 'from...'}),
"replacement": TextInput(attrs={'placeholder': 'to...'})}
UserDataPrefixFormSet = inlineformset_factory(User,
UserDataPrefix,
can_delete=True,
fields=ALL_FIELDS,
widgets=widgets,
max_num=10,
extra=3, )
formset = UserDataPrefixFormSet(request.POST or None, instance=request.user)
if request.method == "POST":
valid = formset.is_valid()
if valid:
formset.save()
add_save_message(request, valid, "IGV Integration")
context_dict = {'user': request.user,
'formset': formset,
'example_replacements': get_example_replacements(request.user)}
return render(request, 'snpdb/settings/igv_integration.html', context_dict)
def cohorts(request):
user_settings = UserSettings.get_for_user(request.user)
initial = {'genome_build': user_settings.default_genome_build}
form = forms.CreateCohortForm(request.POST or None, initial=initial)
if request.method == "POST":
valid = form.is_valid()
if valid:
cohort = form.save()
assign_permission_to_user_and_groups(request.user, cohort)
return HttpResponseRedirect(reverse('view_cohort', kwargs={'cohort_id': cohort.pk}))
else:
add_save_message(request, valid, "Cohort", created=True)
context = {"form": form}
return render(request, 'snpdb/patients/cohorts.html', context)
def view_cohort_details_tab(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
context = {"cohort": cohort,
"has_write_permission": cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_cohort_details_tab.html', context)
def view_cohort(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if cohort.vcf:
return redirect('view_vcf', vcf_id=cohort.vcf.pk)
try:
cohort_genotype_collection = cohort.cohort_genotype_collection
except CohortGenotypeCollection.DoesNotExist:
cohort_genotype_collection = None
form = forms.CohortForm(request.POST or None, instance=cohort)
if request.method == "POST":
if valid := form.is_valid():
cohort = form.save()
add_save_message(request, valid, "Cohort")
sample_form = SampleChoiceForm(genome_build=cohort.genome_build)
sample_form.fields['sample'].required = False
context = {"form": form,
"sample_form": sample_form,
"cohort": cohort,
"cohort_genotype_collection": cohort_genotype_collection,
"has_write_permission": cohort.can_write(request.user)}
return render(request, 'snpdb/patients/view_cohort.html', context)
def cohort_sample_edit(request, cohort_id):
cohort = Cohort.get_for_user(request.user, cohort_id)
if request.method == "POST":
cohort_op = request.POST['cohort_op']
sample_ids_str = request.POST['sample_ids']
sample_ids = json.loads(sample_ids_str)
if cohort_op == 'add':
for sample_id in sample_ids:
cohort.add_sample(sample_id)
elif cohort_op == 'remove':
for sample_id | |
str,
Optional("output-bytes"): str,
Optional("ipv6-transit-statistics"): {
"input-bytes": str,
"input-packets": str,
"output-bytes": str,
"output-packets": str,
},
},
Optional("transit-traffic-statistics"): {
"input-bps": str,
"input-bytes": str,
"input-packets": str,
"input-pps": str,
Optional("ipv6-transit-statistics"): {
"input-bps": str,
"input-bytes": str,
"input-packets": str,
"input-pps": str,
"output-bps": str,
"output-bytes": str,
"output-packets": str,
"output-pps": str
},
"output-bps": str,
"output-bytes": str,
"output-packets": str,
"output-pps": str
}
})
# Validate each dictionary in list
for item in value:
l_i_schema.validate(item)
return value
def verify_queue_list(value):
# Pass address-family list of dict in value
if not isinstance(value, list):
raise SchemaError('queue is not a list')
queue_schema = Schema({
"queue-counters-queued-packets": str,
"queue-counters-total-drop-packets": str,
"queue-counters-trans-packets": str,
"queue-number": str
})
# Validate each dictionary in list
for item in value:
queue_schema.validate(item)
return value
# Create physical-interface Schema
physical_interface_schema = Schema({
Optional("active-alarms"): {
Optional("interface-alarms"): {
Optional("alarm-not-present"): bool,
Optional("ethernet-alarm-link-down"): bool,
}
},
Optional("active-defects"): {
Optional("interface-alarms"): {
Optional("alarm-not-present"): bool,
Optional("ethernet-alarm-link-down"): bool
}
},
Optional("admin-status"): {
Optional("#text"): str,
Optional("@junos:format"): str
},
Optional("bpdu-error"): str,
Optional("clocking"): str,
Optional("current-physical-address"): str,
Optional("description"): str,
Optional("eth-switch-error"): str,
Optional("ethernet-fec-mode"): {
Optional("@junos:style"): str,
"enabled_fec_mode": str
},
Optional("ethernet-fec-statistics"): {
Optional("@junos:style"): str,
"fec_ccw_count": str,
"fec_ccw_error_rate": str,
"fec_nccw_count": str,
"fec_nccw_error_rate": str
},
Optional("ethernet-pcs-statistics"): {
Optional("@junos:style"): str,
"bit-error-seconds": str,
"errored-blocks-seconds": str
},
Optional("hardware-physical-address"): str,
Optional("if-config-flags"): {
Optional("internal-flags"): str,
"iff-snmp-traps": bool,
Optional("iff-hardware-down"): bool,
},
Optional("if-auto-negotiation"): str,
"if-device-flags": {
"ifdf-present": bool,
"ifdf-running": bool,
Optional("ifdf-loopback"): bool,
Optional("ifdf-down"): bool,
},
Optional("if-flow-control"): str,
Optional("if-media-flags"): {
"ifmf-none": bool
},
Optional("if-remote-fault"): str,
Optional("if-type"): str,
Optional("ifd-specific-config-flags"): {
Optional("internal-flags"): str
},
Optional("interface-flapped"): {
"#text": str,
Optional("@junos:seconds"): str
},
Optional("interface-transmit-statistics"): str,
Optional("l2pt-error"): str,
Optional("ld-pdu-error"): str,
Optional("link-level-type"): str,
Optional("link-type"): str,
Optional("link-mode"): str,
Optional("local-index"): str,
Optional("logical-interface"): Use(verify_logical_interface_list),
Optional("loopback"): str,
Optional("lsi-traffic-statistics"): {
Optional("@junos:style"): str,
"input-bps": str,
"input-bytes": str,
"input-packets": str,
"input-pps": str
},
Optional("mru"): str,
Optional("mtu"): str,
"name": str,
Optional("oper-status"): str,
Optional("pad-to-minimum-frame-size"): str,
Optional("physical-interface-cos-information"): {
"physical-interface-cos-hw-max-queues": str,
"physical-interface-cos-use-max-queues": str
},
Optional("snmp-index"): str,
Optional("sonet-mode"): str,
Optional("source-filtering"): str,
Optional("speed"): str,
Optional("stp-traffic-statistics"): {
Optional("@junos:style"): str,
Optional("stp-input-bytes-dropped"): str,
Optional("stp-input-packets-dropped"): str,
Optional("stp-output-bytes-dropped"): str,
Optional("stp-output-packets-dropped"): str
},
Optional("traffic-statistics"): {
Optional("@junos:style"): str,
Optional("input-bps"): str,
Optional("output-bytes"): str,
Optional("input-bytes"): str,
Optional("input-packets"): str,
Optional("input-pps"): str,
Optional("output-bps"): str,
Optional("output-packets"): str,
Optional("output-pps"): str,
Optional("ipv6-transit-statistics"): {
Optional("input-bps"): str,
Optional("input-bytes"): str,
Optional("input-packets"): str,
Optional("input-pps"): str,
Optional("output-bps"): str,
Optional("output-bytes"): str,
Optional("output-packets"): str,
Optional("output-pps"): str
},
},
Optional("output-error-list"): {
Optional("aged-packets"): str,
Optional("carrier-transitions"): str,
Optional("hs-link-crc-errors"): str,
Optional("mtu-errors"): str,
Optional("output-collisions"): str,
Optional("output-drops"): str,
Optional("output-errors"): str,
Optional("output-fifo-errors"): str,
Optional("output-resource-errors"): str
},
Optional("ethernet-mac-statistics"): {
Optional("@junos:style"): str,
"input-broadcasts": str,
"input-bytes": str,
"input-code-violations": str,
"input-crc-errors": str,
"input-fifo-errors": str,
"input-fragment-frames": str,
"input-jabber-frames": str,
"input-mac-control-frames": str,
"input-mac-pause-frames": str,
"input-multicasts": str,
"input-oversized-frames": str,
"input-packets": str,
Optional("input-total-errors"): str,
"input-unicasts": str,
"input-vlan-tagged-frames": str,
"output-broadcasts": str,
"output-bytes": str,
"output-crc-errors": str,
"output-fifo-errors": str,
"output-mac-control-frames": str,
"output-mac-pause-frames": str,
"output-multicasts": str,
"output-packets": str,
Optional("output-total-errors"): str,
"output-unicasts": str
},
Optional("input-error-list"): {
Optional("framing-errors"): str,
Optional("input-discards"): str,
Optional("input-drops"): str,
Optional("input-errors"): str,
Optional("input-fifo-errors"): str,
Optional("input-giants"): str,
Optional("input-l2-channel-errors"): str,
Optional("input-l2-mismatch-timeouts"): str,
Optional("input-l3-incompletes"): str,
Optional("input-resource-errors"): str,
Optional("input-runts"): str
},
Optional("transit-traffic-statistics"): {
"input-bps": str,
"input-bytes": str,
"input-packets": str,
"input-pps": str,
Optional("ipv6-transit-statistics"): {
"input-bps": str,
"input-bytes": str,
"input-packets": str,
"input-pps": str,
"output-bps": str,
"output-bytes": str,
"output-packets": str,
"output-pps": str
},
"output-bps": str,
"output-bytes": str,
"output-packets": str,
"output-pps": str
},
Optional("queue-counters"): {
Optional("@junos:style"): str,
"interface-cos-short-summary": {
"intf-cos-num-queues-in-use": str,
"intf-cos-num-queues-supported": str,
},
"queue": Use(verify_queue_list)
},
})
# Validate each dictionary in list
for item in value:
physical_interface_schema.validate(item)
return value
schema = {
Optional("@xmlns:junos"): str,
"interface-information": {
Optional("@junos:style"): str,
Optional("@xmlns"): str,
"physical-interface": Use(verify_physical_interface_list)
}
}
class ShowInterfaces(ShowInterfacesSchema):
cli_command = ['show interfaces']
def cli(self, output=None):
if not output:
out = self.device.execute(self.cli_command[0])
else:
out = output
ret_dict = {}
statistics_type = None
# Physical interface: ge-0/0/0, Enabled, Physical link is Up
p1 = re.compile(r'^Physical +interface: +(?P<name>\S+), +'
r'(?P<admin_status>\S+), +Physical +link +is +(?P<oper_status>\S+)$')
# Interface index: 148, SNMP ifIndex: 526
p2 = re.compile(r'^Interface +index: +(?P<local_index>\d+), +'
r'SNMP +ifIndex: +(?P<snmp_index>\d+)'
r'(, +Generation: +\S+)$')
# Description: none/100G/in/hktGCS002_ge-0/0/0
p3 = re.compile(r'^Description: +(?P<description>\S+)$')
# Link-level type: Ethernet, MTU: 1514, MRU: 1522, LAN-PHY mode, Speed: 1000mbps, BPDU Error: None,
# Link-level type: Ethernet, MTU: 1514, Link-mode: Full-duplex, Speed: 1000mbps,
p4 = re.compile(r'^(Type: +\S+, )?Link-level +type: +'
r'(?P<link_level_type>\S+), +MTU: +(?P<mtu>\S+)'
r'(, +MRU: +(?P<mru>\d+))?(, +(?P<sonet_mode>\S+) +mode)?'
r'(, +Link-mode: +(?P<link_mode>\S+))?'
r'(, +Speed: +(?P<speed>\S+))?(, +BPDU +Error: +'
r'(?P<bpdu_error>\S+),)?$')
# Speed: 1000mbps, BPDU Error: None, Loop Detect PDU Error: None,
p4_1 = re.compile(r'^(Speed: +(?P<speed>[^\s,]+))(, +)?'
r'(BPDU +Error: +(?P<bpdu_error>[^\s,]+))?(, +)?'
r'(Loop +Detect +PDU +Error: +(?P<ld_pdu_error>[^\s,]+))?(, +)?')
# Link-level type: Ethernet, MTU: 1514, MRU: 1522, LAN-PHY mode, Speed: 1000mbps, BPDU Error: None, Loop Detect PDU Error: None, Ethernet-Switching Error: None, MAC-REWRITE Error: None,
p4_2 = re.compile(r'^Link-level +type: +(?P<link_level_type>\S+), +MTU: +(?P<mtu>\S+)'
r'(, +MRU: +(?P<mru>\d+))?(, +(?P<sonet_mode>\S+) +mode)?'
r'(, +Speed: +(?P<speed>\S+))?(, +BPDU +Error: +(?P<bpdu_error>\S+),)?'
r'( +Loop +Detect +PDU +Error: +(?P<ld_pdu_error>\S+),)?'
r'( +Ethernet-Switching +Error: +(?P<eth_switch_error>\S+),)?'
r'( +MAC-REWRITE +Error: +\S+)?$')
# Loop Detect PDU Error: None, Ethernet-Switching Error: None, MAC-REWRITE Error: None, Loopback: Disabled,
p5 = re.compile(r'^Loop +Detect +PDU +Error: +(?P<ld_pdu_error>\S+), +'
r'Ethernet-Switching +Error: +(?P<eth_switch_error>\S+), +MAC-REWRITE +'
r'Error: +\S+, +Loopback: +(?P<loopback>\S+),$')
# Ethernet-Switching Error: None, MAC-REWRITE Error: None, Loopback: Disabled,
p5_1 = re.compile(r'^(Ethernet-Switching +Error: +(?P<eth_switch_error>[^\s,]+))'
r'(, +)?(MAC-REWRITE +Error: +[^\s,]+)?(, +)?'
r'(Loopback: +(?P<loopback>[^\s,]+))(, +)?')
# Loopback: Disabled, Source filtering: Disabled, Flow control: Enabled, Auto-negotiation: Enabled, Remote fault: Online
p5_2 = re.compile(r'^(Loopback: +(?P<loopback>\S+),)?'
r'( +Source +filtering: +(?P<source_filtering>\S+),)?'
r'( +Flow +control: +(?P<if_flow_control>\S+),)?'
r'( +Auto-negotiation: +(?P<if_auto_negotiation>\S+),)?'
r'( +Remote +fault: +(?P<if_remote_fault>\S+))$')
# Source filtering: Disabled, Flow control: Enabled, Auto-negotiation: Enabled, Remote fault: Online
p6 = re.compile(r'^Source +filtering: +(?P<source_filtering>\S+), +'
r'Flow +control: +(?P<if_flow_control>\S+), +'
r'Auto-negotiation: +(?P<if_auto_negotiation>\S+), +'
r'Remote +fault: +(?P<if_remote_fault>\S+)$')
# Pad to minimum frame size: Disabled
p7 = re.compile(r'^Pad +to +minimum +frame +size: +'
r'(?P<pad_to_minimum_frame_size>\S+)$')
# Device flags : Present Running
p8 = re.compile(r'^Device +flags +: +(?P<if_device_flags>[\S\s]+)$')
# Interface flags: SNMP-Traps Internal: 0x4000
p9 = re.compile(r'^Interface +flags:( +(?P<hardware_down>Hardware-Down))? +'
r'(?P<iff_snmp_traps>\S+)( +Internal: +(?P<internal_flags>\S+))?$')
# Link flags : None
p10 = re.compile(r'^Link +flags +: +(?P<if_media_flags>\S+)$')
# Link type : Full-Duplex
p10_1 = re.compile(r'^Link +type +: +(?P<link_type>\S+)$')
# CoS queues : 8 supported, 8 maximum usable queues
p11 = re.compile(r'^CoS +queues +: +(?P<physical_interface_cos_hw_max_queues>\d+) +'
r'supported, +(?P<physical_interface_cos_use_max_queues>\d+) maximum +'
r'usable +queues$')
# Current address: 00:50:56:ff:56:b6, Hardware address: 00:50:56:ff:56:b6
p12 = re.compile(r'^Current +address: +(?P<current_physical_address>\S+), +'
r'Hardware +address: +(?P<hardware_physical_address>\S+)$')
# Last flapped : 2019-08-29 09:09:19 UTC (29w6d 18:56 ago)
p13 = re.compile(r'^Last +flapped +: +(?P<interface_flapped>[\S\s]+)$')
# Input rate : 2952 bps (5 pps)
p14 = re.compile(r'^Input +rate +: +(?P<input_bps>\d+) +'
r'bps +\((?P<input_pps>\d+) +pps\)$')
# Input bytes : 19732539397 3152 bps
p14_1 = re.compile(r'^Input +bytes *: +(?P<input_bytes>\S+)'
r'( +(?P<input_bps>\S+) +bps)?$')
# Output bytes : 16367814635 3160 bps
p14_2 = re.compile(r'^Output +bytes *: +(?P<output_bytes>\S+)'
r'( +(?P<output_bps>\S+) +bps)?$')
# Input packets: 133726363 5 pps
p14_3 = re.compile(r'^Input +packets *: +(?P<input_packets>\S+)'
r'( +(?P<input_pps>\S+) +pps)?$')
# Output packets: 129306863 4 pps
p14_4 = re.compile(r'^Output +packets *: +(?P<output_packets>\S+)'
r'( +(?P<output_pps>\S+) +pps)?$')
# Output rate : 3080 bps (3 pps)
p15 = re.compile(r'^Output +rate +: +(?P<output_bps>\d+) +'
r'bps +\((?P<output_pps>\d+) +pps\)$')
# Active alarms : None
p16 = re.compile(r'^Active +alarms *: +(?P<active_alarms>\S+)$')
# Active defects : None
p17 = re.compile(r'^Active +defects *: +(?P<active_defects>\S+)$')
# PCS statistics Seconds
p18 = re.compile(r'^PCS +statistics +Seconds$')
# Bit errors 0
p19 = re.compile(r'^Bit +errors +(?P<bit_error_seconds>\d+)$')
# Errored blocks 0
p20 = re.compile(r'^Errored +blocks +(?P<errored_blocks_seconds>\d+)$')
# Ethernet FEC statistics Errors
p21 = re.compile(r'^Ethernet +FEC +statistics +Errors$')
# FEC Corrected Errors 0
# FEC Uncorrected Errors 0
# FEC Corrected Errors Rate 0
# FEC Uncorrected Errors Rate 0
p22 = re.compile(r'^FEC +Corrected +Errors +(?P<fec_ccw_count>\d+)$')
p22_1 = re.compile(r'^FEC +Uncorrected +Errors +(?P<fec_nccw_count>\d+)$')
p22_2 = re.compile(r'^FEC +Corrected +Errors +Rate +(?P<fec_ccw_error_rate>\d+)$')
p22_3 = re.compile(r'^FEC +Uncorrected +Errors +Rate +(?P<fec_nccw_error_rate>\d+)$')
# Interface transmit statistics: Disabled
p23 = re.compile(r'^Interface +transmit +statistics: +'
r'(?P<interface_transmit_statistics>\S+)$')
# Logical interface ge-0/0/0.0 (Index 333) (SNMP ifIndex 606)
p24 = re.compile(r'^Logical +interface +(?P<name>\S+) +'
r'\(Index +(?P<local_index>\d+)\) +\(SNMP +ifIndex +'
r'(?P<snmp_index>\d+)\)( +\(Generation +\S+\))?$')
# Flags: Up SNMP-Traps 0x4004000 Encapsulation: ENET2
# Flags: Up SNMP-Traps 0x4000 VLAN-Tag [ 0x8100.1 ] Encapsulation: ENET2
p25 = re.compile(r'^Flags: +(?P<iff_up>\S+)( +SNMP-Traps)?'
r'( +(?P<internal_flags>\S+))?( +VLAN-Tag +\[[\S\s]+\])? +'
r'Encapsulation: +(?P<encapsulation>\S+)$')
# Input packets : 133657033
p26 = re.compile(r'^Input +packets *: +(?P<input_packets>\S+)$')
# Output packets: 129243982
p27 = re.compile(r'^Output +packets *: +(?P<output_packets>\S+)$')
# Protocol inet, MTU: 1500, Maximum labels: 2
# Protocol inet, MTU: 1500, Generation: 150, Route table: | |
<reponame>fief-dev/fief<filename>backend/tests/test_apps_auth_auth.py
from typing import Dict, Optional
import httpx
import pytest
from fastapi import status
from fief.crypto.token import get_token_hash
from fief.db import AsyncSession
from fief.managers import GrantManager, LoginSessionManager, SessionTokenManager
from fief.services.response_type import DEFAULT_RESPONSE_MODE, HYBRID_RESPONSE_TYPES
from fief.settings import settings
from tests.data import TestData, session_token_tokens
from tests.helpers import authorization_code_assertions, get_params_by_response_mode
from tests.types import TenantParams
@pytest.mark.asyncio
@pytest.mark.workspace_host
class TestAuthAuthorize:
@pytest.mark.parametrize(
"params,error",
[
pytest.param(
{
"response_type": "code",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
},
"invalid_client",
id="Missing client_id",
),
pytest.param(
{
"response_type": "code",
"client_id": "INVALID_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
},
"invalid_client",
id="Invalid client",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"scope": "openid",
},
"invalid_redirect_uri",
id="Missing redirect_uri",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://bordeaux.city/callback",
"scope": "openid",
},
"invalid_redirect_uri",
id="Not authorized redirect_uri",
),
],
)
async def test_authorize_error(
self,
tenant_params: TenantParams,
test_client_auth: httpx.AsyncClient,
params: Dict[str, str],
error: str,
):
response = await test_client_auth.get(
f"{tenant_params.path_prefix}/authorize", params=params
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
headers = response.headers
assert headers["X-Fief-Error"] == error
@pytest.mark.parametrize(
"params,error",
[
pytest.param(
{
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
},
"invalid_request",
id="Missing response_type",
),
pytest.param(
{
"response_type": "magic_wand",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
},
"invalid_request",
id="Invalid response_type",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
},
"invalid_request",
id="Missing scope",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "user",
},
"invalid_scope",
id="Missing openid scope",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"prompt": "INVALID_PROMPT",
},
"invalid_request",
id="Invalid prompt",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"prompt": "none",
},
"login_required",
id="None prompt without session",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"prompt": "consent",
},
"login_required",
id="Consent prompt without session",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"screen": "INVALID_SCREEN",
},
"invalid_request",
id="Invalid screen",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"request": "REQUEST_PARAMETER",
},
"request_not_supported",
id="Use of unsupported request parameter",
),
pytest.param(
{
"response_type": "code id_token",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"request": "REQUEST_PARAMETER",
},
"request_not_supported",
id="Use of unsupported request parameter with a Hybrid flow without nonce",
),
pytest.param(
{
"response_type": "code",
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
"code_challenge": "CODE_CHALLENGE",
"code_challenge_method": "UNSUPPORTED_METHOD",
},
"invalid_request",
id="Invalid code_challenge_method",
),
*[
pytest.param(
{
"response_type": response_type,
"client_id": "DEFAULT_TENANT_CLIENT_ID",
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
},
"invalid_request",
id=f"Missing nonce in Hybrid flow with {response_type}",
)
for response_type in HYBRID_RESPONSE_TYPES
],
],
)
async def test_authorize_redirect_error(
self,
tenant_params: TenantParams,
test_client_auth: httpx.AsyncClient,
params: Dict[str, str],
error: str,
):
response = await test_client_auth.get(
f"{tenant_params.path_prefix}/authorize", params=params
)
assert response.status_code == status.HTTP_302_FOUND
redirect_uri = response.headers["Location"]
assert redirect_uri.startswith("https://nantes.city/callback")
try:
response_mode = DEFAULT_RESPONSE_MODE[params["response_type"]]
except KeyError:
response_mode = "query"
redirect_params = get_params_by_response_mode(redirect_uri, response_mode)
assert redirect_params["error"] == error
@pytest.mark.parametrize(
"params,session,redirection",
[
pytest.param(
{"response_type": "code"}, False, "/login", id="Default login screen"
),
pytest.param(
{"response_type": "code", "screen": "login"},
False,
"/login",
id="Login screen",
),
pytest.param(
{"response_type": "code", "screen": "register"},
False,
"/register",
id="Register screen",
),
pytest.param(
{"response_type": "code"}, True, "/consent", id="No prompt with session"
),
pytest.param(
{"response_type": "code", "prompt": "none"},
True,
"/consent",
id="None prompt with session",
),
pytest.param(
{"response_type": "code", "prompt": "consent"},
True,
"/consent",
id="Consent prompt with session",
),
pytest.param(
{"response_type": "code", "prompt": "login"},
True,
"/login",
id="Login prompt with session",
),
pytest.param(
{"response_type": "code", "nonce": "NONCE"},
False,
"/login",
id="Provided nonce value",
),
pytest.param(
{"response_type": "code", "max_age": 3600},
True,
"/consent",
id="max_age one hour ago",
),
pytest.param(
{"response_type": "code", "max_age": 0},
True,
"/login",
id="max_age now",
),
pytest.param(
{"response_type": "code", "code_challenge": "CODE_CHALLENGE"},
False,
"/login",
id="code_challenge without method",
),
pytest.param(
{
"response_type": "code",
"code_challenge": "CODE_CHALLENGE",
"code_challenge_method": "S256",
},
False,
"/login",
id="code_challenge with specified method",
),
*[
pytest.param(
{
"response_type": response_type,
"nonce": "NONCE",
},
False,
"/login",
id=f"Hybrid flow with {response_type}",
)
for response_type in HYBRID_RESPONSE_TYPES
],
],
)
async def test_valid(
self,
params: Dict[str, str],
session: bool,
redirection: str,
tenant_params: TenantParams,
test_client_auth: httpx.AsyncClient,
workspace_session: AsyncSession,
):
params = {
**params,
"client_id": tenant_params.client.client_id,
"redirect_uri": "https://nantes.city/callback",
"scope": "openid",
}
cookies = {}
if session:
cookies[settings.session_cookie_name] = tenant_params.session_token_token[0]
response = await test_client_auth.get(
f"{tenant_params.path_prefix}/authorize", params=params, cookies=cookies
)
assert response.status_code == status.HTTP_302_FOUND
location = response.headers["Location"]
assert location.endswith(f"{tenant_params.path_prefix}{redirection}")
login_session_cookie = response.cookies[settings.login_session_cookie_name]
login_session_manager = LoginSessionManager(workspace_session)
login_session = await login_session_manager.get_by_token(login_session_cookie)
assert login_session is not None
if "nonce" in params:
assert login_session.nonce == params["nonce"]
if "code_challenge" in params:
assert login_session.code_challenge == params["code_challenge"]
if "code_challenge_method" in params:
assert (
login_session.code_challenge_method
== params["code_challenge_method"]
)
else:
assert login_session.code_challenge_method == "plain"
if params["response_type"] in ["code"]:
assert login_session.response_mode == "query"
else:
assert login_session.response_mode == "fragment"
@pytest.mark.asyncio
@pytest.mark.workspace_host
class TestAuthGetLogin:
@pytest.mark.parametrize("cookie", [None, "INVALID_LOGIN_SESSION"])
async def test_invalid_login_session(
self,
cookie: Optional[str],
tenant_params: TenantParams,
test_client_auth: httpx.AsyncClient,
):
cookies = {}
if cookie is not None:
cookies[settings.login_session_cookie_name] = cookie
response = await test_client_auth.get(
f"{tenant_params.path_prefix}/login", cookies=cookies
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
headers = response.headers
assert headers["X-Fief-Error"] == "invalid_session"
async def test_valid(
self, test_client_auth: httpx.AsyncClient, test_data: TestData
):
login_session = test_data["login_sessions"]["default"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
response = await test_client_auth.get(f"{path_prefix}/login", cookies=cookies)
assert response.status_code == status.HTTP_200_OK
@pytest.mark.asyncio
@pytest.mark.workspace_host
class TestAuthPostLogin:
@pytest.mark.parametrize("cookie", [None, "INVALID_LOGIN_SESSION"])
async def test_invalid_login_session(
self,
cookie: Optional[str],
tenant_params: TenantParams,
test_client_auth: httpx.AsyncClient,
):
cookies = {}
if cookie is not None:
cookies[settings.login_session_cookie_name] = cookie
response = await test_client_auth.post(
f"{tenant_params.path_prefix}/login", cookies=cookies
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
headers = response.headers
assert headers["X-Fief-Error"] == "invalid_session"
async def test_bad_credentials(
self, test_client_auth: httpx.AsyncClient, test_data: TestData
):
login_session = test_data["login_sessions"]["default"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
response = await test_client_auth.post(
f"{path_prefix}/login",
data={
"username": "<EMAIL>",
"password": "<PASSWORD>",
},
cookies=cookies,
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
headers = response.headers
assert headers["X-Fief-Error"] == "bad_credentials"
async def test_valid(
self,
test_client_auth: httpx.AsyncClient,
test_data: TestData,
workspace_session: AsyncSession,
):
login_session = test_data["login_sessions"]["default"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
response = await test_client_auth.post(
f"{path_prefix}/login",
data={
"username": "<EMAIL>chy",
"password": "<PASSWORD>",
},
cookies=cookies,
)
assert response.status_code == status.HTTP_302_FOUND
redirect_uri = response.headers["Location"]
assert redirect_uri.endswith(f"{path_prefix}/consent")
session_cookie = response.cookies[settings.session_cookie_name]
session_token_manager = SessionTokenManager(workspace_session)
session_token = await session_token_manager.get_by_token(
get_token_hash(session_cookie)
)
assert session_token is not None
async def test_valid_with_session(
self,
test_client_auth: httpx.AsyncClient,
test_data: TestData,
workspace_session: AsyncSession,
):
login_session = test_data["login_sessions"]["default"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
session_token = test_data["session_tokens"]["regular"]
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
cookies[settings.session_cookie_name] = session_token_tokens["regular"][0]
response = await test_client_auth.post(
f"{path_prefix}/login",
data={
"username": "<EMAIL>",
"password": "<PASSWORD>",
},
cookies=cookies,
)
assert response.status_code == status.HTTP_302_FOUND
redirect_uri = response.headers["Location"]
assert redirect_uri.endswith(f"{path_prefix}/consent")
session_cookie = response.cookies[settings.session_cookie_name]
session_token_manager = SessionTokenManager(workspace_session)
new_session_token = await session_token_manager.get_by_token(
get_token_hash(session_cookie)
)
assert new_session_token is not None
assert new_session_token.id != session_token.id
old_session_token = await session_token_manager.get_by_id(session_token.id)
assert old_session_token is None
@pytest.mark.asyncio
@pytest.mark.workspace_host
class TestAuthGetConsent:
@pytest.mark.parametrize("cookie", [None, "INVALID_LOGIN_SESSION"])
async def test_invalid_login_session(
self,
cookie: Optional[str],
tenant_params: TenantParams,
test_client_auth: httpx.AsyncClient,
):
cookies = {}
if cookie is not None:
cookies[settings.login_session_cookie_name] = cookie
response = await test_client_auth.get(
f"{tenant_params.path_prefix}/consent", cookies=cookies
)
assert response.status_code == status.HTTP_400_BAD_REQUEST
headers = response.headers
assert headers["X-Fief-Error"] == "invalid_session"
@pytest.mark.parametrize("cookie", [None, "INVALID_SESSION_TOKEN"])
async def test_invalid_session_token(
self,
cookie: Optional[str],
test_client_auth: httpx.AsyncClient,
test_data: TestData,
):
login_session = test_data["login_sessions"]["default"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
if cookie is not None:
cookies[settings.session_cookie_name] = cookie
response = await test_client_auth.get(f"{path_prefix}/consent", cookies=cookies)
assert response.status_code == status.HTTP_302_FOUND
redirect_uri = response.headers["Location"]
assert redirect_uri.endswith(f"{path_prefix}/login")
async def test_valid(
self, test_client_auth: httpx.AsyncClient, test_data: TestData
):
login_session = test_data["login_sessions"]["default"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
session_token_token = session_token_tokens["regular"][0]
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
cookies[settings.session_cookie_name] = session_token_token
response = await test_client_auth.get(f"{path_prefix}/consent", cookies=cookies)
assert response.status_code == status.HTTP_200_OK
async def test_none_prompt_without_grant(
self,
test_client_auth: httpx.AsyncClient,
test_data: TestData,
workspace_session: AsyncSession,
):
login_session = test_data["login_sessions"]["default_none_prompt"]
client = login_session.client
tenant = client.tenant
path_prefix = tenant.slug if not tenant.default else ""
session_token_token = session_token_tokens["regular"][0]
cookies = {}
cookies[settings.login_session_cookie_name] = login_session.token
cookies[settings.session_cookie_name] = session_token_token
response = await test_client_auth.get(f"{path_prefix}/consent", cookies=cookies)
assert response.status_code == status.HTTP_302_FOUND
redirect_uri = response.headers["Location"]
assert redirect_uri.startswith(login_session.redirect_uri)
redirect_params = get_params_by_response_mode(
redirect_uri, login_session.response_mode
)
assert redirect_params["error"] == "consent_required"
assert redirect_params["state"] == login_session.state
login_session_manager = LoginSessionManager(workspace_session)
used_login_session = await login_session_manager.get_by_token(
login_session.token
)
assert used_login_session is None
async def test_granted(
self,
test_client_auth: httpx.AsyncClient,
test_data: TestData,
workspace_session: AsyncSession,
):
login_session = test_data["login_sessions"]["granted_default"]
client = login_session.client
tenant = client.tenant
| |
# -*- coding: utf-8 -*-
# Copyright (c) 2009 Las Cumbres Observatory (www.lcogt.net)
# Copyright (c) 2010 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
'''
channel.py - Socket implementation of Google's Protocol Buffers RPC
service interface.
This package contains classes providing a socket implementation of the
RPCChannel abstract class.
Original Authors: <NAME> (<EMAIL>)
<NAME> (<EMAIL>)
<NAME> (<EMAIL>)
May 2009, Nov 2010
Modified for snakebite: W<NAME> (<EMAIL>)
May 2012
'''
# Standard library imports
import socket
import os
import math
# Third party imports
from google.protobuf.service import RpcChannel
# Protobuf imports
from snakebite.protobuf.RpcHeader_pb2 import RpcRequestHeaderProto, RpcResponseHeaderProto
from snakebite.protobuf.IpcConnectionContext_pb2 import IpcConnectionContextProto
from snakebite.protobuf.ProtobufRpcEngine_pb2 import RequestHeaderProto
from snakebite.protobuf.datatransfer_pb2 import OpReadBlockProto, BlockOpResponseProto, PacketHeaderProto, ClientReadStatusProto
from snakebite.platformutils import get_current_username
from snakebite.formatter import format_bytes
from snakebite.errors import RequestError, TransientException, FatalException
from snakebite.crc32c import crc
import google.protobuf.internal.encoder as encoder
import google.protobuf.internal.decoder as decoder
# Module imports
import logger
import logging
import struct
import uuid
_kerberos_available = False
try:
from snakebite.rpc_sasl import SaslRpcClient
from snakebite.kerberos import Kerberos
_kerberos_available = True
except ImportError:
pass
# Configure package logging
log = logger.getLogger(__name__)
def log_protobuf_message(header, message):
log.debug("%s:\n\n\033[92m%s\033[0m" % (header, message))
def get_delimited_message_bytes(byte_stream, nr=4):
''' Parse a delimited protobuf message. This is done by first getting a protobuf varint from
the stream that represents the length of the message, then reading that amount of
from the message and then parse it.
Since the int can be represented as max 4 bytes, first get 4 bytes and try to decode.
The decoder returns the value and the position where the value was found, so we need
to rewind the buffer to the position, because the remaining bytes belong to the message
after.
'''
(length, pos) = decoder._DecodeVarint32(byte_stream.read(nr), 0)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message length (pos %d): %d" % (pos, length))
delimiter_bytes = nr - pos
byte_stream.rewind(delimiter_bytes)
message_bytes = byte_stream.read(length)
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Delimited message bytes (%d): %s" % (len(message_bytes), format_bytes(message_bytes)))
total_len = length + pos
return (total_len, message_bytes)
class RpcBufferedReader(object):
'''Class that wraps a socket and provides some utility methods for reading
and rewinding of the buffer. This comes in handy when reading protobuf varints.
'''
MAX_READ_ATTEMPTS = 100
def __init__(self, socket):
self.socket = socket
self.reset()
def read(self, n):
'''Reads n bytes into the internal buffer'''
bytes_wanted = n - self.buffer_length + self.pos + 1
if bytes_wanted > 0:
self._buffer_bytes(bytes_wanted)
end_pos = self.pos + n
ret = self.buffer[self.pos + 1:end_pos + 1]
self.pos = end_pos
return ret
def _buffer_bytes(self, n):
to_read = n
for _ in xrange(self.MAX_READ_ATTEMPTS):
bytes_read = self.socket.recv(to_read)
self.buffer += bytes_read
to_read -= len(bytes_read)
if to_read == 0:
log.debug("Bytes read: %d, total: %d" % (len(bytes_read), self.buffer_length))
return n
if len(bytes_read) < n:
# we'd like to distinguish transient (e.g. network-related) problems
# note: but this error could also be a logic error
raise TransientException("RpcBufferedReader only managed to read %s out of %s bytes" % (len(bytes_read), n))
def rewind(self, places):
'''Rewinds the current buffer to a position. Needed for reading varints,
because we might read bytes that belong to the stream after the varint.
'''
log.debug("Rewinding pos %d with %d places" % (self.pos, places))
self.pos -= places
log.debug("Reset buffer to pos %d" % self.pos)
def reset(self):
self.buffer = ""
self.pos = -1 # position of last byte read
@property
def buffer_length(self):
'''Returns the length of the current buffer.'''
return len(self.buffer)
class SocketRpcChannel(RpcChannel):
ERROR_BYTES = 18446744073709551615L
RPC_HEADER = "hrpc"
RPC_SERVICE_CLASS = 0x00
AUTH_PROTOCOL_NONE = 0x00
AUTH_PROTOCOL_SASL = 0xDF
RPC_PROTOCOL_BUFFFER = 0x02
'''Socket implementation of an RpcChannel.
'''
def __init__(self, host, port, version, effective_user=None, use_sasl=False, hdfs_namenode_principal=None):
'''SocketRpcChannel to connect to a socket server on a user defined port.
It possible to define version and effective user for the communication.'''
self.host = host
self.port = port
self.sock = None
self.call_id = -3 # First time (when the connection context is sent, the call_id should be -3, otherwise start with 0 and increment)
self.version = version
self.client_id = str(uuid.uuid4())
self.use_sasl = use_sasl
self.hdfs_namenode_principal = hdfs_namenode_principal
if self.use_sasl:
if not _kerberos_available:
raise FatalException("Kerberos libs not found. Please install snakebite using 'pip install snakebite[kerberos]'")
kerberos = Kerberos()
self.effective_user = effective_user or kerberos.user_principal().name
else:
self.effective_user = effective_user or get_current_username()
def validate_request(self, request):
'''Validate the client request against the protocol file.'''
# Check the request is correctly initialized
if not request.IsInitialized():
raise FatalException("Client request (%s) is missing mandatory fields" % type(request))
def get_connection(self, host, port):
'''Open a socket connection to a given host and port and writes the Hadoop header
The Hadoop RPC protocol looks like this when creating a connection:
+---------------------------------------------------------------------+
| Header, 4 bytes ("hrpc") |
+---------------------------------------------------------------------+
| Version, 1 byte (default verion 9) |
+---------------------------------------------------------------------+
| RPC service class, 1 byte (0x00) |
+---------------------------------------------------------------------+
| Auth protocol, 1 byte (Auth method None = 0) |
+---------------------------------------------------------------------+
| Length of the RpcRequestHeaderProto + length of the |
| of the IpcConnectionContextProto (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Serialized delimited RpcRequestHeaderProto |
+---------------------------------------------------------------------+
| Serialized delimited IpcConnectionContextProto |
+---------------------------------------------------------------------+
'''
log.debug("############## CONNECTING ##############")
# Open socket
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sock.settimeout(10)
# Connect socket to server - defined by host and port arguments
self.sock.connect((host, port))
# Send RPC headers
self.write(self.RPC_HEADER) # header
self.write(struct.pack('B', self.version)) # version
self.write(struct.pack('B', self.RPC_SERVICE_CLASS)) # RPC service class
if self.use_sasl:
self.write(struct.pack('B', self.AUTH_PROTOCOL_SASL)) # serialization type (protobuf = 0xDF)
else:
self.write(struct.pack('B', self.AUTH_PROTOCOL_NONE)) # serialization type (protobuf = 0)
if self.use_sasl:
sasl = SaslRpcClient(self, hdfs_namenode_principal=self.hdfs_namenode_principal)
sasl_connected = sasl.connect()
if not sasl_connected:
raise TransientException("SASL is configured, but cannot get connected")
rpc_header = self.create_rpc_request_header()
context = self.create_connection_context()
header_length = len(rpc_header) + encoder._VarintSize(len(rpc_header)) +len(context) + encoder._VarintSize(len(context))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Header length: %s (%s)" % (header_length, format_bytes(struct.pack('!I', header_length))))
self.write(struct.pack('!I', header_length))
self.write_delimited(rpc_header)
self.write_delimited(context)
def write(self, data):
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("Sending: %s", format_bytes(data))
self.sock.send(data)
def write_delimited(self, data):
self.write(encoder._VarintBytes(len(data)))
self.write(data)
def create_rpc_request_header(self):
'''Creates and serializes a delimited RpcRequestHeaderProto message.'''
rpcheader = RpcRequestHeaderProto()
rpcheader.rpcKind = 2 # rpcheaderproto.RpcKindProto.Value('RPC_PROTOCOL_BUFFER')
rpcheader.rpcOp = 0 # rpcheaderproto.RpcPayloadOperationProto.Value('RPC_FINAL_PACKET')
rpcheader.callId = self.call_id
rpcheader.retryCount = -1
rpcheader.clientId = self.client_id[0:16]
if self.call_id == -3:
self.call_id = 0
else:
self.call_id += 1
# Serialize delimited
s_rpcHeader = rpcheader.SerializeToString()
log_protobuf_message("RpcRequestHeaderProto (len: %d)" % (len(s_rpcHeader)), rpcheader)
return s_rpcHeader
def create_connection_context(self):
'''Creates and seriazlies a IpcConnectionContextProto (not delimited)'''
context = IpcConnectionContextProto()
context.userInfo.effectiveUser = self.effective_user
context.protocol = "org.apache.hadoop.hdfs.protocol.ClientProtocol"
s_context = context.SerializeToString()
log_protobuf_message("RequestContext (len: %d)" % len(s_context), context)
return s_context
def send_rpc_message(self, method, request):
'''Sends a Hadoop RPC request to the NameNode.
The IpcConnectionContextProto, RpcPayloadHeaderProto and HadoopRpcRequestProto
should already be serialized in the right way (delimited or not) before
they are passed in this method.
The Hadoop RPC protocol looks like this for sending requests:
When sending requests
+---------------------------------------------------------------------+
| Length of the next three parts (4 bytes/32 bit int) |
+---------------------------------------------------------------------+
| Delimited serialized RpcRequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized RequestHeaderProto (varint len + header) |
+---------------------------------------------------------------------+
| Delimited serialized Request (varint len + request) |
+---------------------------------------------------------------------+
'''
log.debug("############## SENDING ##############")
#0. RpcRequestHeaderProto
rpc_request_header = self.create_rpc_request_header()
#1. RequestHeaderProto
request_header = self.create_request_header(method)
#2. Param
param = request.SerializeToString()
if log.getEffectiveLevel() == logging.DEBUG:
log_protobuf_message("Request", request)
rpc_message_length = len(rpc_request_header) + encoder._VarintSize(len(rpc_request_header)) + \
len(request_header) + encoder._VarintSize(len(request_header)) + \
len(param) + encoder._VarintSize(len(param))
if log.getEffectiveLevel() == logging.DEBUG:
log.debug("RPC message length: %s (%s)" % (rpc_message_length, format_bytes(struct.pack('!I', rpc_message_length))))
self.write(struct.pack('!I', | |
Get Clone Sets Function
def volume_group_clone_get_sets(args):
obj = VolumeGroup(args.ip, args.port)
try:
res= obj.volume_group_clone_get_sets(args.name)
return common.format_json_object(res)
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"Get clone copy sets for " +
args.name +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
"get",
"clone copy sets",
e.err_text,
e.err_code)
# clone_get_parser
def clone_get_parser(subcommand_parsers, common_parser):
clone_get_parser = subcommand_parsers.add_parser(
'clone-get',
parents=[common_parser],
conflict_handler='resolve',
help='Get clones of a volume group by set name',
description='ViPR Get Clones of a VolumeGroup by Copy Set Name CLI usage.')
mandatory_args = clone_get_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
mandatory_args.add_argument('-setname', '-s',
metavar='<setname>',
dest='setname',
help='Copy set name',
required=True)
clone_get_parser.set_defaults(func=volume_group_clone_get)
# Get Clones by Copy Set Name Function
def volume_group_clone_get(args):
obj = VolumeGroup(args.ip, args.port)
try:
res= obj.volume_group_clone_get(args.name, args.setname)
return common.format_json_object(res)
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"Get clones by copy set for " +
args.name + " with set " + args.setname +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
"get",
"clones by set name",
e.err_text,
e.err_code)
# volume group snapshot routines
def snapshot_parser(subcommand_parsers, common_parser):
snapshot_parser = subcommand_parsers.add_parser(
'snapshot',
description='ViPR VolumeGroup Snapshot CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Create volume group snapshot')
mandatory_args = snapshot_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
copy_set_name = snapshot_parser.add_mutually_exclusive_group(required=True)
copy_set_name.add_argument('-copysetname',
metavar='<copysetname>',
dest='copySetName',
help='Name of snapshot to create')
copy_set_name.add_argument('-snapshotsetname', '-s',
metavar='<snapshotsetname>',
dest='snapshotname',
help='Name of snapshot set (Deprecated)')
sub_groups = snapshot_parser.add_mutually_exclusive_group(required=False)
sub_groups.add_argument('-subgroups', '-sg',
metavar='<subgroups>',
dest='subGroups',
help='List of sub groups for partial request')
sub_groups.add_argument('-volumes', '-v',
metavar='<tenant/project/volume_label,...>',
dest='volumes',
help='A list of volumes specifying their Array Replication Groups.' +
'This field is valid only when partial flag is provided (Deprecated)')
inactive_flag = snapshot_parser.add_mutually_exclusive_group(required=False)
inactive_flag.add_argument('-createinactive', '-ci',
dest='createinactive',
action='store_true',
help='Create snapshot with inactive state (Deprecated)')
inactive_flag.add_argument('-inactive',
dest='inactive',
action='store_true',
help='Create snapshot with inactive state')
snapshot_parser.add_argument('-readonly', '-ro',
dest='readonly',
action='store_true',
help='Create read only snapshot')
snapshot_parser.add_argument('-partial',
dest='partial',
action='store_true',
help='To create snapshot for subset of volume group. ' +
'Please specify one volume from each Array Replication Group (Deprecated)')
snapshot_parser.set_defaults(func=volume_group_snapshot)
def volume_group_snapshot(args):
obj = VolumeGroup(args.ip, args.port)
try:
if (args.copySetName):
copy_name = args.copySetName
else:
copy_name = args.snapshotname
if (args.inactive):
inactive_flag = args.inactive
else:
inactive_flag = args.createinactive
volumeUris = query_volumes_for_partial_request(args)
obj.snapshot(args.name, copy_name, args.subGroups, inactive_flag, args.readonly, args.partial, ",".join(volumeUris))
return
except SOSError as e:
common.format_err_msg_and_raise(
"create",
"volume group snapshot",
e.err_text,
e.err_code)
# Common Parser for snapshot operations
def volume_group_snapshot_common_parser(cc_common_parser):
mandatory_args = cc_common_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
group = cc_common_parser.add_mutually_exclusive_group(required=True)
group.add_argument('-copysetname', '-cs',
metavar='<copysetname>',
dest='copySetName',
help='Name of a copy set')
group.add_argument('-snapshots', '-s',
metavar='<snapshotname,...>',
dest='snapshots',
help='A snapshot of a volume group specifying which snapshot set to act on. ' +
'For partial operation, specify one snapshot from each Array Replication Group (Deprecated)')
cc_common_parser.add_argument('-subgroups', '-sg',
metavar='<subgroups>',
dest='subGroups',
help='List of sub groups for partial request')
cc_common_parser.add_argument('-partial',
dest='partial',
action='store_true',
help='To operate on snapshots for subset of VolumeGroup. ' +
'Please specify one snapshot from each Array Replication Group (Deprecated)')
def volume_group_snapshot_operation(args, operation, uri):
obj = VolumeGroup(args.ip, args.port)
try:
if (args.snapshots):
snapshots = set(args.snapshots.split(','))
else:
snapshots = ""
obj.volume_group_snapshot_operation(
args.name,
args.copySetName,
args.subGroups,
snapshots,
args.partial,
uri)
return
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
operation + " snapshot for " +
args.name +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
operation,
"snapshot",
e.err_text,
e.err_code)
# snapshot_activate_parser
def snapshot_activate_parser(subcommand_parsers, common_parser):
snapshot_activate_parser = subcommand_parsers.add_parser(
'snapshot-activate',
parents=[common_parser],
conflict_handler='resolve',
help='Activate volume group snapshot',
description='ViPR Activate Snapshot of a VolumeGroup CLI usage.')
# Add parameter from common snapshot parser.
volume_group_snapshot_common_parser(snapshot_activate_parser)
snapshot_activate_parser.set_defaults(func=volume_group_snapshot_activate)
# Activate Snapshot Function
def volume_group_snapshot_activate(args):
volume_group_snapshot_operation(args, "activate", VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_ACTIVATE)
# snapshot_deactivate_parser
def snapshot_deactivate_parser(subcommand_parsers, common_parser):
snapshot_deactivate_parser = subcommand_parsers.add_parser(
'snapshot-deactivate',
parents=[common_parser],
conflict_handler='resolve',
help='Deactivate volume group snapshot',
description='ViPR Deactivate Snapshot of a VolumeGroup CLI usage.')
# Add parameter from common snapshot parser.
volume_group_snapshot_common_parser(snapshot_deactivate_parser)
snapshot_deactivate_parser.set_defaults(func=volume_group_snapshot_deactivate)
# Deactivate Snapshot Function
def volume_group_snapshot_deactivate(args):
volume_group_snapshot_operation(args, "deactivate", VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_DEACTIVATE)
# snapshot_restore_parser
def snapshot_restore_parser(subcommand_parsers, common_parser):
snapshot_restore_parser = subcommand_parsers.add_parser(
'snapshot-restore',
parents=[common_parser],
conflict_handler='resolve',
help='Restore volume group snapshot',
description='ViPR Restore Snapshot of a VolumeGroup CLI usage.')
# Add parameter from common snapshot parser.
volume_group_snapshot_common_parser(snapshot_restore_parser)
snapshot_restore_parser.set_defaults(func=volume_group_snapshot_restore)
# Restore Snapshot Function
def volume_group_snapshot_restore(args):
volume_group_snapshot_operation(args, "restore", VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_RESTORE)
# snapshot_resync_parser
def snapshot_resync_parser(subcommand_parsers, common_parser):
snapshot_resync_parser = subcommand_parsers.add_parser(
'snapshot-resync',
parents=[common_parser],
conflict_handler='resolve',
help='Resynchronize volume group snapshot',
description='ViPR Resynchronize Snapshot of a VolumeGroup CLI usage.')
# Add parameter from common snapshot parser.
volume_group_snapshot_common_parser(snapshot_resync_parser)
snapshot_resync_parser.set_defaults(func=volume_group_snapshot_resync)
# Resynchronize Snapshot Function
def volume_group_snapshot_resync(args):
volume_group_snapshot_operation(args, "resynchronize", VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_RESYNCHRONIZE)
# snapshot_expose_parser
def snapshot_expose_parser(subcommand_parsers, common_parser):
snapshot_expose_parser = subcommand_parsers.add_parser(
'snapshot-expose',
parents=[common_parser],
conflict_handler='resolve',
help='Export volume group snapshot to VPlex',
description='ViPR Export Snapshot of a VolumeGroup to VPlex CLI usage.')
# Add parameter from common snapshot parser.
volume_group_snapshot_common_parser(snapshot_expose_parser)
snapshot_expose_parser.set_defaults(func=volume_group_snapshot_expose)
# Export Snapshot Function
def volume_group_snapshot_expose(args):
volume_group_snapshot_operation(args, "expose", VolumeGroup.URI_VOLUME_GROUP_SNAPSHOT_EXPOSE)
# snapshot_list_parser
def snapshot_list_parser(subcommand_parsers, common_parser):
snapshot_list_parser = subcommand_parsers.add_parser(
'snapshot-list',
parents=[common_parser],
conflict_handler='resolve',
help='Get all snapshots of a volume group',
description='ViPR List Snapshot of a VolumeGroup CLI usage.')
mandatory_args = snapshot_list_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
snapshot_list_parser.set_defaults(func=volume_group_snapshot_list)
# List Snapshot Function
def volume_group_snapshot_list(args):
obj = VolumeGroup(args.ip, args.port)
try:
res= obj.volume_group_snapshot_list(args.name)
return common.format_json_object(res)
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"List snapshot for " +
args.name +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
"list",
"snapshot",
e.err_text,
e.err_code)
# snapshot_show_parser
def snapshot_show_parser(subcommand_parsers, common_parser):
snapshot_show_parser = subcommand_parsers.add_parser(
'snapshot-show',
parents=[common_parser],
conflict_handler='resolve',
help='Show details of a volume group snapshot',
description='ViPR Show Snapshot of a VolumeGroup CLI usage.')
mandatory_args = snapshot_show_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
mandatory_args.add_argument('-snapshotname', '-s',
metavar='<snapshot name>',
dest='snapshotname',
help='Name of snapshot',
required=True)
snapshot_show_parser.set_defaults(func=volume_group_snapshot_show)
# Get Snapshot Function
def volume_group_snapshot_show(args):
obj = VolumeGroup(args.ip, args.port)
try:
res= obj.volume_group_snapshot_show(args.name,
args.snapshotname)
return common.format_json_object(res)
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"Show snapshot for " +
args.name +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
"show",
"snapshot",
e.err_text,
e.err_code)
# snapshot_get_sets_parser
def snapshot_get_sets_parser(subcommand_parsers, common_parser):
snapshot_get_sets_parser = subcommand_parsers.add_parser(
'snapshot-get-sets',
parents=[common_parser],
conflict_handler='resolve',
help='Get snapshot copy set names of a volume group',
description='ViPR Get Snapshot Copy Set Names of a VolumeGroup CLI usage.')
mandatory_args = snapshot_get_sets_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
snapshot_get_sets_parser.set_defaults(func=volume_group_snapshot_get_sets)
# Get Snapshot Sets Function
def volume_group_snapshot_get_sets(args):
obj = VolumeGroup(args.ip, args.port)
try:
res= obj.volume_group_snapshot_get_sets(args.name)
return common.format_json_object(res)
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"Get snapshot copy sets for " +
args.name +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
"get",
"snapshot copy sets",
e.err_text,
e.err_code)
# snapshot_get_parser
def snapshot_get_parser(subcommand_parsers, common_parser):
snapshot_get_parser = subcommand_parsers.add_parser(
'snapshot-get',
parents=[common_parser],
conflict_handler='resolve',
help='Get snapshots of a volume group by set name',
description='ViPR Get Snapshots of a VolumeGroup by Copy Set Name CLI usage.')
mandatory_args = snapshot_get_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
mandatory_args.add_argument('-setname', '-s',
metavar='<snapshot set name>',
dest='setname',
help='Snapshot set name',
required=True)
snapshot_get_parser.set_defaults(func=volume_group_snapshot_get)
# Get Snapshots by Copy Set Name Function
def volume_group_snapshot_get(args):
obj = VolumeGroup(args.ip, args.port)
try:
res= obj.volume_group_snapshot_get(args.name, args.setname)
return common.format_json_object(res)
except SOSError as e:
if (e.err_code == SOSError.SOS_FAILURE_ERR):
raise SOSError(
SOSError.SOS_FAILURE_ERR,
"Get snapshot for " +
args.name + " with set " + args.setname +
" failed\n" +
e.err_text)
else:
common.format_err_msg_and_raise(
"get",
"snapshot by set name",
e.err_text,
e.err_code)
# volume group snapshot session routines
def snapshotsession_parser(subcommand_parsers, common_parser):
snapshotsession_parser = subcommand_parsers.add_parser(
'snapshotsession',
description='ViPR VolumeGroup Snapshot Session CLI usage.',
parents=[common_parser],
conflict_handler='resolve',
help='Create volume group snapshot session')
mandatory_args = snapshotsession_parser.add_argument_group('mandatory arguments')
mandatory_args.add_argument('-name', '-n',
metavar='<name>',
dest='name',
help='Name of volume group',
required=True)
copy_set_name = snapshotsession_parser.add_mutually_exclusive_group(required=True)
copy_set_name.add_argument('-copysetname',
metavar='<copysetname>',
dest='copySetName',
help='Name of snapshot session set')
copy_set_name.add_argument('-snapshotsessionsetname', '-s',
metavar='<snapshot session set name>',
dest='snapshotsessionname',
help='Name of snapshot session set (Deprecated)')
sub_groups = snapshotsession_parser.add_mutually_exclusive_group(required=False)
sub_groups.add_argument('-subgroups', '-sg',
metavar='<subgroups>',
dest='subGroups',
help='List of sub groups for partial request')
sub_groups.add_argument('-volumes', '-v',
metavar='<tenant/project/volume_label,...>',
dest='volumes',
help='A list of volumes specifying their Array Replication Groups.' +
'This field is valid only when partial flag is provided (Deprecated)')
snapshotsession_parser.add_argument('-readonly', '-ro',
dest='readonly',
action='store_true',
help='Create read only snapshot session')
snapshotsession_parser.add_argument('-partial',
dest='partial',
action='store_true',
help='To create snapshot session for subset of volume group. ' +
'Please specify one volume from each Array Replication Group (Deprecated)')
snapshotsession_parser.add_argument('-count', '-ct',
dest='count',
metavar='<count>',
help='Number of target volumes. Optional, if provided, targetname and copymode need to be provided',
required=False)
snapshotsession_parser.add_argument('-targetname', '-t',
help='This option specifies the target name. Optional, if provided, count and copymode need to be provided',
dest='target_name',
metavar='<target name>',
required=False)
snapshotsession_parser.add_argument('-copymode', '-cm',
help='Whether to create in copy or nocopy mode. Optional, if provided, count and targetname need to be provided' ,
dest='copymode',
choices=SnapshotSession.COPY_MODE,
required=False)
snapshotsession_parser.set_defaults(func=volume_group_snapshotsession)
def volume_group_snapshotsession(args):
obj = VolumeGroup(args.ip, args.port)
try:
if (args.copySetName):
copy_name = | |
<filename>ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/fipElpSwRjtFcf_template.py
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FipElpSwRjtFcf(Base):
__slots__ = ()
_SDM_NAME = 'fipElpSwRjtFcf'
_SDM_ATT_MAP = {
'HeaderFipVersion': 'fipElpSwRjtFcf.header.fipVersion-1',
'HeaderFipReserved': 'fipElpSwRjtFcf.header.fipReserved-2',
'FipOperationCodeFipVirtualLinkInstantiation': 'fipElpSwRjtFcf.header.fipOperation.fipOperationCode.fipVirtualLinkInstantiation-3',
'FipOperationFipOperationReserved1': 'fipElpSwRjtFcf.header.fipOperation.fipOperationReserved1-4',
'FipSubcodeFipSubcode02h': 'fipElpSwRjtFcf.header.fipOperation.fipSubcode.fipSubcode02h-5',
'FipOperationFipDescriptorListLength': 'fipElpSwRjtFcf.header.fipOperation.fipDescriptorListLength-6',
'FipOperationFipFp': 'fipElpSwRjtFcf.header.fipOperation.fipFp-7',
'FipOperationFipSp': 'fipElpSwRjtFcf.header.fipOperation.fipSp-8',
'FipOperationFipReserved2': 'fipElpSwRjtFcf.header.fipOperation.fipReserved2-9',
'FipOperationFipABit': 'fipElpSwRjtFcf.header.fipOperation.fipABit-10',
'FipOperationFipSBit': 'fipElpSwRjtFcf.header.fipOperation.fipSBit-11',
'FipOperationFipFBit': 'fipElpSwRjtFcf.header.fipOperation.fipFBit-12',
'FipElpDescriptorFipElpDescriptorType': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpDescriptorType-13',
'FipElpDescriptorFipElpDescriptorLength': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpDescriptorLength-14',
'FipElpDescriptorFipElpDescriptorReserved': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpDescriptorReserved-15',
'FipElpDescriptorFibreChannelRCtlExchangeLinkParametersFipElpDescriptorFibreChannelRCtlExchangeLinkParametersRequestReply': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelRCtl.fipElpDescriptorFibreChannelRCtlExchangeLinkParameters.fipElpDescriptorFibreChannelRCtlExchangeLinkParametersRequestReply-16',
'FipElpFibreChannelFipElpDescriptorFibreChannelDId': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelDId-17',
'FipElpFibreChannelFipElpDescriptorFibreChannelCsCtlPriority': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelCsCtlPriority-18',
'FipElpFibreChannelFipElpDescriptorFibreChannelSId': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelSId-19',
'FipElpFibreChannelFipElpDescriptorFibreChannelType': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelType-20',
'FCtlExchangeContext': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.exchangeContext-21',
'FCtlSequenceContext': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.sequenceContext-22',
'FCtlFirstSequence': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.firstSequence-23',
'FCtlLastSequence': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.lastSequence-24',
'FCtlEndSequence': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.endSequence-25',
'FCtlEndConnection': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.endConnection-26',
'FCtlCsCtlPriority': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.csCtlPriority-27',
'FCtlSequenceInitiative': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.sequenceInitiative-28',
'FCtlFcXidReassigned': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.fcXidReassigned-29',
'FCtlFcInvalidateXid': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.fcInvalidateXid-30',
'FCtlAckForm': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.ackForm-31',
'FCtlFcDataCompression': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.fcDataCompression-32',
'FCtlFcDataEncryption': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.fcDataEncryption-33',
'FCtlRetransmittedSequence': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.retransmittedSequence-34',
'FCtlUnidirectionalTransmit': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.unidirectionalTransmit-35',
'FCtlContinueSeqCondition': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.continueSeqCondition-36',
'FCtlAbortSeqCondition': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.abortSeqCondition-37',
'FCtlRelativeOffsetPresent': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.relativeOffsetPresent-38',
'FCtlExchangeReassembly': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.exchangeReassembly-39',
'FCtlFillBytes': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelFCtl.fCtl.fillBytes-40',
'FipElpFibreChannelFipElpDescriptorFibreChannelSeqId': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelSeqId-41',
'FipElpFibreChannelFipElpDescriptorFibreChannelDfCtl': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelDfCtl-42',
'FipElpFibreChannelFipElpDescriptorFibreChannelSeqCnt': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelSeqCnt-43',
'FipElpFibreChannelFipElpDescriptorFibreChannelOxId': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelOxId-44',
'FipElpFibreChannelFipElpDescriptorFibreChannelRxId': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelRxId-45',
'FipElpFibreChannelFipElpDescriptorFibreChannelParameter': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpFibreChannel.fipElpDescriptorFibreChannelParameter-46',
'FipElpDescriptorFcElpCommandCodeFipElpDescriptorFcElpCommandCodeExchangeLinkParametersReject': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpDescriptorFcElp.fipElpDescriptorFcElpRequestReply.fipElpDescriptorFcElpCommandCode.fipElpDescriptorFcElpCommandCodeExchangeLinkParametersReject-47',
'FipElpDescriptorFcElpRequestReplyFipElpDescriptorFcElpSwRjtReasonCodes': 'fipElpSwRjtFcf.header.fipDescriptors.fipSelectFipDescriptor.fipElpDescriptor.fipElpDescriptorFcElp.fipElpDescriptorFcElpRequestReply.fipElpDescriptorFcElpSwRjtReasonCodes-48',
}
def __init__(self, parent, list_op=False):
super(FipElpSwRjtFcf, self).__init__(parent, list_op)
@property
def HeaderFipVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFipVersion']))
@property
def HeaderFipReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFipReserved']))
@property
def FipOperationCodeFipVirtualLinkInstantiation(self):
"""
Display Name: Virtual Link Instantiation
Default Value: 0x0002
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationCodeFipVirtualLinkInstantiation']))
@property
def FipOperationFipOperationReserved1(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipOperationReserved1']))
@property
def FipSubcodeFipSubcode02h(self):
"""
Display Name: Subcode 02h
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipSubcodeFipSubcode02h']))
@property
def FipOperationFipDescriptorListLength(self):
"""
Display Name: FIP Descriptor List Length
Default Value: 9
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipDescriptorListLength']))
@property
def FipOperationFipFp(self):
"""
Display Name: FP
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipFp']))
@property
def FipOperationFipSp(self):
"""
Display Name: SP
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipSp']))
@property
def FipOperationFipReserved2(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipReserved2']))
@property
def FipOperationFipABit(self):
"""
Display Name: A bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipABit']))
@property
def FipOperationFipSBit(self):
"""
Display Name: S bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipSBit']))
@property
def FipOperationFipFBit(self):
"""
Display Name: F bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipFBit']))
@property
def FipElpDescriptorFipElpDescriptorType(self):
"""
Display Name: ELP Descriptor Type
Default Value: 10
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpDescriptorFipElpDescriptorType']))
@property
def FipElpDescriptorFipElpDescriptorLength(self):
"""
Display Name: ELP Descriptor Length
Default Value: 9
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpDescriptorFipElpDescriptorLength']))
@property
def FipElpDescriptorFipElpDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpDescriptorFipElpDescriptorReserved']))
@property
def FipElpDescriptorFibreChannelRCtlExchangeLinkParametersFipElpDescriptorFibreChannelRCtlExchangeLinkParametersRequestReply(self):
"""
Display Name: Request/Reply
Default Value: 3
Value Format: decimal
Available enum values: Request, 2, Reply, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpDescriptorFibreChannelRCtlExchangeLinkParametersFipElpDescriptorFibreChannelRCtlExchangeLinkParametersRequestReply']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelDId(self):
"""
Display Name: D_ID
Default Value: 0xFFFFFD
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpFibreChannelFipElpDescriptorFibreChannelDId']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelCsCtlPriority(self):
"""
Display Name: CS_CTL/Priority
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpFibreChannelFipElpDescriptorFibreChannelCsCtlPriority']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelSId(self):
"""
Display Name: S_ID
Default Value: 0xFFFFFD
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpFibreChannelFipElpDescriptorFibreChannelSId']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelType(self):
"""
Display Name: Type
Default Value: 0x22
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpFibreChannelFipElpDescriptorFibreChannelType']))
@property
def FCtlExchangeContext(self):
"""
Display Name: Exchange Context
Default Value: 0
Value Format: decimal
Available enum values: Originator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlExchangeContext']))
@property
def FCtlSequenceContext(self):
"""
Display Name: Sequence Context
Default Value: 0
Value Format: decimal
Available enum values: Initiator, 0, Receipient, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlSequenceContext']))
@property
def FCtlFirstSequence(self):
"""
Display Name: First Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, First, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFirstSequence']))
@property
def FCtlLastSequence(self):
"""
Display Name: Last Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlLastSequence']))
@property
def FCtlEndSequence(self):
"""
Display Name: End Sequence
Default Value: 0
Value Format: decimal
Available enum values: Other, 0, Last, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlEndSequence']))
@property
def FCtlEndConnection(self):
"""
Display Name: End Connection
Default Value: 0
Value Format: decimal
Available enum values: Alive, 0, Pending, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlEndConnection']))
@property
def FCtlCsCtlPriority(self):
"""
Display Name: CS_CTL/Priority Enable
Default Value: 0
Value Format: decimal
Available enum values: CS_CTL, 0, Priority, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlCsCtlPriority']))
@property
def FCtlSequenceInitiative(self):
"""
Display Name: Sequence Initiative
Default Value: 0
Value Format: decimal
Available enum values: Hold, 0, Transfer, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlSequenceInitiative']))
@property
def FCtlFcXidReassigned(self):
"""
Display Name: FC XID Reassigned
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcXidReassigned']))
@property
def FCtlFcInvalidateXid(self):
"""
Display Name: FC Invalidate XID
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcInvalidateXid']))
@property
def FCtlAckForm(self):
"""
Display Name: ACK_Form
Default Value: 0
Value Format: decimal
Available enum values: No assistance provided, 0, ACK_1 Required, 1, reserved, 2, Ack_0 Required, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlAckForm']))
@property
def FCtlFcDataCompression(self):
"""
Display Name: FC Data Compression
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcDataCompression']))
@property
def FCtlFcDataEncryption(self):
"""
Display Name: FC Data Encryption
Default Value: 0
Value Format: decimal
Available enum values: No, 0, Yes, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFcDataEncryption']))
@property
def FCtlRetransmittedSequence(self):
"""
Display Name: Retransmitted Sequence
Default Value: 0
Value Format: decimal
Available enum values: Original, 0, Retransmission, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlRetransmittedSequence']))
@property
def FCtlUnidirectionalTransmit(self):
"""
Display Name: Unidirectional Transmit
Default Value: 0
Value Format: decimal
Available enum values: Bi-directional, 0, Unidirectional, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlUnidirectionalTransmit']))
@property
def FCtlContinueSeqCondition(self):
"""
Display Name: Continue Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: No information, 0, Sequence to follow-immediately, 1, Squence to follow-soon, 2, Sequence to follow-delayed, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlContinueSeqCondition']))
@property
def FCtlAbortSeqCondition(self):
"""
Display Name: Abort Sequence Condition
Default Value: 0
Value Format: decimal
Available enum values: 0x00, 0, 0x01, 1, 0x10, 2, 0x11, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlAbortSeqCondition']))
@property
def FCtlRelativeOffsetPresent(self):
"""
Display Name: Relative offset present
Default Value: 0
Value Format: decimal
Available enum values: Parameter field defined, 0, Relative offset, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlRelativeOffsetPresent']))
@property
def FCtlExchangeReassembly(self):
"""
Display Name: Exchange Reassembly
Default Value: 0
Value Format: decimal
Available enum values: off, 0, on, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlExchangeReassembly']))
@property
def FCtlFillBytes(self):
"""
Display Name: Fill Bytes
Default Value: 0
Value Format: decimal
Available enum values: 0 bytes of fill, 0, 1 bytes of fill, 1, 2 bytes of fill, 2, 3 bytes of fill, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FCtlFillBytes']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelSeqId(self):
"""
Display Name: SEQ_ID
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpFibreChannelFipElpDescriptorFibreChannelSeqId']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelDfCtl(self):
"""
Display Name: DF_CTL
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipElpFibreChannelFipElpDescriptorFibreChannelDfCtl']))
@property
def FipElpFibreChannelFipElpDescriptorFibreChannelSeqCnt(self):
"""
Display Name: SEQ_CNT
Default Value: 0
Value Format: decimal
"""
| |
return self._entity_data.get('vehiclescript')
return "scripts/vehicles/jeep_test.txt"
@property
def actionScale(self):
if "actionScale" in self._entity_data:
return float(self._entity_data.get('actionScale'))
return float(1)
class BaseDriveableVehicle(BaseVehicle):
pass
@property
def VehicleLocked(self):
if "VehicleLocked" in self._entity_data:
return bool(self._entity_data.get('VehicleLocked'))
return bool(0)
class prop_vehicle(BaseVehicle):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Always Think (Run physics every frame)': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class prop_vehicle_driveable(BaseDriveableVehicle):
pass
class point_apc_controller(Targetname):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Active': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def yawrate(self):
if "yawrate" in self._entity_data:
return self._entity_data.get('yawrate')
return "30"
@property
def yawtolerance(self):
if "yawtolerance" in self._entity_data:
return self._entity_data.get('yawtolerance')
return "15"
@property
def pitchrate(self):
if "pitchrate" in self._entity_data:
return self._entity_data.get('pitchrate')
return "0"
@property
def pitchtolerance(self):
if "pitchtolerance" in self._entity_data:
return self._entity_data.get('pitchtolerance')
return "20"
@property
def rotatestartsound(self):
if "rotatestartsound" in self._entity_data:
return self._entity_data.get('rotatestartsound')
return ""
@property
def rotatesound(self):
if "rotatesound" in self._entity_data:
return self._entity_data.get('rotatesound')
return ""
@property
def rotatestopsound(self):
if "rotatestopsound" in self._entity_data:
return self._entity_data.get('rotatestopsound')
return ""
@property
def minRange(self):
if "minRange" in self._entity_data:
return self._entity_data.get('minRange')
return "0"
@property
def maxRange(self):
if "maxRange" in self._entity_data:
return self._entity_data.get('maxRange')
return "0"
@property
def targetentityname(self):
if "targetentityname" in self._entity_data:
return self._entity_data.get('targetentityname')
return ""
class prop_vehicle_apc(BaseDriveableVehicle):
pass
@property
def missilehint(self):
if "missilehint" in self._entity_data:
return self._entity_data.get('missilehint')
return ""
class info_apc_missile_hint(Targetname, EnableDisable):
pass
@property
def target(self):
if "target" in self._entity_data:
return self._entity_data.get('target')
return ""
class prop_vehicle_jeep(BaseDriveableVehicle):
pass
@property
def CargoVisible(self):
if "CargoVisible" in self._entity_data:
return bool(self._entity_data.get('CargoVisible'))
return bool(0)
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'HUD Locator Precache': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class vehicle_viewcontroller(BaseDriveableVehicle):
pass
class prop_vehicle_airboat(BaseDriveableVehicle):
pass
@property
def model(self):
if "model" in self._entity_data:
return self._entity_data.get('model')
return "models/airboat.vmdl"
@property
def vehiclescript(self):
if "vehiclescript" in self._entity_data:
return self._entity_data.get('vehiclescript')
return "scripts/vehicles/airboat.txt"
@property
def EnableGun(self):
if "EnableGun" in self._entity_data:
return bool(self._entity_data.get('EnableGun'))
return bool(0)
class prop_vehicle_cannon(BaseDriveableVehicle):
pass
class prop_vehicle_crane(BaseDriveableVehicle):
pass
@property
def magnetname(self):
if "magnetname" in self._entity_data:
return self._entity_data.get('magnetname')
return ""
class prop_vehicle_prisoner_pod(BaseDriveableVehicle, Parentname):
pass
@property
def model(self):
if "model" in self._entity_data:
return self._entity_data.get('model')
return "models/vehicles/prisoner_pod.vmdl"
@property
def vehiclescript(self):
if "vehiclescript" in self._entity_data:
return self._entity_data.get('vehiclescript')
return "scripts/vehicles/prisoner_pod.txt"
class env_speaker(BaseSpeaker):
pass
icon_sprite = "editor/ambient_generic.vmat"
class script_tauremoval(Targetname, Parentname):
pass
@property
def vortigaunt(self):
if "vortigaunt" in self._entity_data:
return self._entity_data.get('vortigaunt')
return None
class script_intro(Targetname):
pass
@property
def alternatefovchange(self):
if "alternatefovchange" in self._entity_data:
return bool(self._entity_data.get('alternatefovchange'))
return bool(0)
class env_citadel_energy_core(Targetname, Parentname):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'No small particles': (1, 0), 'Start on': (2, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def scale(self):
if "scale" in self._entity_data:
return float(self._entity_data.get('scale'))
return float(1)
class env_alyxemp(Targetname, Parentname):
pass
@property
def Type(self):
if "Type" in self._entity_data:
return self._entity_data.get('Type')
return "0"
@property
def EndTargetName(self):
if "EndTargetName" in self._entity_data:
return self._entity_data.get('EndTargetName')
return ""
class test_sidelist:
pass
def __init__(self, entity_data: dict):
self._entity_data = entity_data
@property
def sides(self):
if "sides" in self._entity_data:
return parse_int_vector(self._entity_data.get('sides'))
return parse_int_vector("None")
class info_teleporter_countdown(Targetname):
pass
icon_sprite = "editor/info_target.vmat"
class prop_vehicle_choreo_generic(BaseDriveableVehicle, Parentname):
pass
@property
def model(self):
if "model" in self._entity_data:
return self._entity_data.get('model')
return "models/vehicles/prisoner_pod.vmdl"
@property
def vehiclescript(self):
if "vehiclescript" in self._entity_data:
return self._entity_data.get('vehiclescript')
return "scripts/vehicles/choreo_vehicle.txt"
@property
def ignoremoveparent(self):
if "ignoremoveparent" in self._entity_data:
return bool(self._entity_data.get('ignoremoveparent'))
return bool(0)
@property
def ignoreplayer(self):
if "ignoreplayer" in self._entity_data:
return bool(self._entity_data.get('ignoreplayer'))
return bool(0)
class filter_combineball_type(BaseFilter):
pass
icon_sprite = "editor/filter_class.vmat"
@property
def balltype(self):
if "balltype" in self._entity_data:
return self._entity_data.get('balltype')
return "1"
class env_entity_dissolver(Targetname):
pass
@property
def target(self):
if "target" in self._entity_data:
return self._entity_data.get('target')
return ""
@property
def magnitude(self):
if "magnitude" in self._entity_data:
return int(self._entity_data.get('magnitude'))
return int(250)
@property
def dissolvetype(self):
if "dissolvetype" in self._entity_data:
return self._entity_data.get('dissolvetype')
return "Energy"
class prop_coreball(Targetname):
pass
class prop_scalable(Targetname, Studiomodel, RenderFields):
pass
class point_push(Targetname):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Test LOS before pushing': (1, 0), 'Use angles for push direction': (2, 0),
'No falloff (constant push at any distance)': (4, 0), 'Push players': (8, 1),
'Push physics': (16, 1)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def enabled(self):
if "enabled" in self._entity_data:
return bool(self._entity_data.get('enabled'))
return bool(1)
@property
def magnitude(self):
if "magnitude" in self._entity_data:
return float(self._entity_data.get('magnitude'))
return float(100)
@property
def radius(self):
if "radius" in self._entity_data:
return float(self._entity_data.get('radius'))
return float(128)
@property
def inner_radius(self):
if "inner_radius" in self._entity_data:
return float(self._entity_data.get('inner_radius'))
return float(0)
@property
def influence_cone(self):
if "influence_cone" in self._entity_data:
return float(self._entity_data.get('influence_cone'))
return float(0)
@property
def filtername(self):
if "filtername" in self._entity_data:
return self._entity_data.get('filtername')
return ""
class npc_antlion_grub(Targetname, BaseFadeProp, Global):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {'Do not automatically attach to surface': (1, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
class weapon_striderbuster(BasePropPhysics):
pass
@property
def spawnflags(self):
flags = []
if "spawnflags" in self._entity_data:
value = self._entity_data.get("spawnflags", None)
for name, (key, _) in {"Don't use game_weapon_manager": (8388608, 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def dud(self):
if "dud" in self._entity_data:
return bool(self._entity_data.get('dud'))
return bool(0)
class point_flesh_effect_target(Targetname, Parentname):
pass
@property
def radius(self):
if "radius" in self._entity_data:
return float(self._entity_data.get('radius'))
return float(8)
class prop_door_rotating(BasePropDoorRotating):
pass
class prop_door_rotating_physics(BasePropDoorRotating, ConstraintSoundInfo):
pass
@property
def LatchIsBreakable(self):
if "LatchIsBreakable" in self._entity_data:
return bool(self._entity_data.get('LatchIsBreakable'))
return bool(0)
@property
def HingeIsBreakable(self):
if "HingeIsBreakable" in self._entity_data:
return bool(self._entity_data.get('HingeIsBreakable'))
return bool(0)
@property
def ForceFullyOpen(self):
if "ForceFullyOpen" in self._entity_data:
return bool(self._entity_data.get('ForceFullyOpen'))
return bool(0)
@property
def friction(self):
if "friction" in self._entity_data:
return float(self._entity_data.get('friction'))
return float(0.001)
@property
def GrabAttachmentName(self):
if "GrabAttachmentName" in self._entity_data:
return self._entity_data.get('GrabAttachmentName')
return "grab"
class markup_volume(Targetname, Parentname, Global, EnableDisable):
pass
class markup_volume_tagged(markup_volume):
pass
@property
def groupnames(self):
flags = []
if "groupnames" in self._entity_data:
value = self._entity_data.get("groupnames", None)
for name, (key, _) in {}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def tagFieldNames(self):
if "tagFieldNames" in self._entity_data:
return self._entity_data.get('tagFieldNames')
return "groupnames"
class markup_group(markup_volume_tagged):
pass
@property
def groupbyvolume(self):
if "groupbyvolume" in self._entity_data:
return bool(self._entity_data.get('groupbyvolume'))
return bool(0)
@property
def groupothergroups(self):
if "groupothergroups" in self._entity_data:
return bool(self._entity_data.get('groupothergroups'))
return bool(0)
class func_nav_markup(markup_volume_tagged):
pass
@property
def navProperty_NavGen(self):
flags = []
if "navProperty_NavGen" in self._entity_data:
value = self._entity_data.get("navProperty_NavGen", None)
for name, (key, _) in {'Walkable Seed': ('WALKABLESEED', 0), 'No Nav': ('NONAV', 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def navProperty_NavAttributes(self):
flags = []
if "navProperty_NavAttributes" in self._entity_data:
value = self._entity_data.get("navProperty_NavAttributes", None)
for name, (key, _) in {'Avoid': ('AVOID', 0), 'Split': ('SPLIT', 0)}.items():
if value & key > 0:
flags.append(name)
return flags
@property
def tagFieldNames(self):
if "tagFieldNames" in self._entity_data:
return self._entity_data.get('tagFieldNames')
return "navProperty_NavAttributes"
class markup_volume_with_ref(markup_volume_tagged):
pass
@property
def ref_position(self):
if "ref_position" in self._entity_data:
return parse_int_vector(self._entity_data.get('ref_position'))
return parse_int_vector("0 0 0")
@property
def use_ref_position(self):
if "use_ref_position" in self._entity_data:
return bool(self._entity_data.get('use_ref_position'))
return bool(1)
class post_processing_volume(Trigger):
pass
@property
def postprocessing(self):
if "postprocessing" in self._entity_data:
return self._entity_data.get('postprocessing')
return None
@property
def fadetime(self):
if "fadetime" in self._entity_data:
return float(self._entity_data.get('fadetime'))
return float(1.0)
@property
def enableexposure(self):
if "enableexposure" in self._entity_data:
return bool(self._entity_data.get('enableexposure'))
return bool(1)
@property
def minexposure(self):
if "minexposure" in self._entity_data:
return float(self._entity_data.get('minexposure'))
return float(0.25)
@property
def maxexposure(self):
if "maxexposure" in self._entity_data:
return float(self._entity_data.get('maxexposure'))
return float(8)
@property
def exposurecompensation(self):
if "exposurecompensation" in self._entity_data:
return float(self._entity_data.get('exposurecompensation'))
return float(0)
@property
def exposurespeedup(self):
if "exposurespeedup" in self._entity_data:
return float(self._entity_data.get('exposurespeedup'))
return float(1)
@property
def exposurespeeddown(self):
if "exposurespeeddown" in self._entity_data:
return float(self._entity_data.get('exposurespeeddown'))
return float(1)
@property
def master(self):
if "master" in self._entity_data:
return bool(self._entity_data.get('master'))
return bool(0)
class worldspawn(worldbase):
pass
@property
def baked_light_index_min(self):
if "baked_light_index_min" in self._entity_data:
return int(self._entity_data.get('baked_light_index_min'))
return int(0)
@property
def baked_light_index_max(self):
if "baked_light_index_max" in self._entity_data:
return int(self._entity_data.get('baked_light_index_max'))
return int(256)
@property
def max_lightmap_resolution(self):
if "max_lightmap_resolution" in self._entity_data:
return self._entity_data.get('max_lightmap_resolution')
return "0"
@property
def lightmap_queries(self):
if "lightmap_queries" in self._entity_data:
return bool(self._entity_data.get('lightmap_queries'))
return bool(1)
class shared_enable_disable:
pass
def __init__(self, entity_data: dict):
self._entity_data = entity_data
@property
def start_enabled(self):
if "start_enabled" in self._entity_data:
return bool(self._entity_data.get('start_enabled'))
return bool(1)
class trigger_traversal_modifier(Trigger):
pass
@property
def target_point(self):
if "target_point" in self._entity_data:
return parse_int_vector(self._entity_data.get('target_point'))
return parse_int_vector("0 0 0")
@property
def top_point(self):
if "top_point" in self._entity_data:
return parse_int_vector(self._entity_data.get('top_point'))
return parse_int_vector("0 64 0")
@property
def bottom_point(self):
if "bottom_point" in self._entity_data:
return parse_int_vector(self._entity_data.get('bottom_point'))
return parse_int_vector("64 0 0")
@property
def instant_traversal(self):
if "instant_traversal" in self._entity_data:
return | |
Case where performance data are already in kWh/yr and no further
# calculations are required
if orig_perf_units == "kWh/yr" and any([
x not in modes for x in orig_perf.keys()]):
perf_kwh_yr = orig_perf
# Case where performance data are in units of kWh/yr, but are
# broken out by operational mode (e.g, active, ready, sleep, off);
# convert to annual kWh/yr values
elif orig_perf_units == "kWh/yr" and any([
x in modes for x in orig_perf.keys()]):
# Pre-allocate converted performance dict
perf_kwh_yr = {}
# Loop through all operational modes and sum performance values
for mode in orig_perf.keys():
# First item in loop; set the first kWh/yr values
if len(perf_kwh_yr.keys()) == 0:
perf_kwh_yr = {key: orig_perf[mode][key]
for key in years_str}
# Subsequent items in loop; add to previous kWh/yr values
else:
perf_kwh_yr = {key: perf_kwh_yr[key] + orig_perf[mode][key]
for key in years_str}
# Case where performance data are in units of W and are
# broken out by operational mode (e.g, active, ready, sleep, off);
# convert to annual kWh/yr values
elif type(orig_perf_units) == list and all([
x in orig_perf_units for x in [
"W", "fraction annual operating hours"]]):
# Pre-allocate converted performance dict
perf_kwh_yr = {}
# Loop through all operational modes and sum performance values
for mode in orig_perf.keys():
# First item in loop; set the first kWh/yr value
# by multiplying W/mode by 8760 annual operational hours and
# dividing by 1000 (to convert from Wh to kWh)
if len(perf_kwh_yr.keys()) == 0:
perf_kwh_yr = {key: ((orig_perf[mode][key][0] *
orig_perf[mode][key][1] * 8760) /
1000)
for key in orig_perf[mode].keys()}
# Subsequent items; add to previous kWh/yr values
else:
perf_kwh_yr = {key: (perf_kwh_yr[key] + (
(orig_perf[mode][key][0] *
orig_perf[mode][key][1] * 8760) / 1000))
for key in orig_perf[mode].keys()}
# Case where other unexpected performance units are given (throw error)
else:
raise ValueError("Unexpected baseline performance units for MELs "
"baseline segment " + str(key_list) + "")
# Set final performance levels
the_perf['typical'] = perf_kwh_yr
# Set final performance units
the_perf["units"] = "kWh/yr"
# Set final performance source data
the_perf['source'] = specific_cpl_data['performance']['source']
# Extract lifetime data as-is
the_life['average'] = specific_cpl_data['lifetime']['average']
the_life['range'] = specific_cpl_data['lifetime']['range']
the_life['units'] = specific_cpl_data['lifetime']['units']
the_life['source'] = specific_cpl_data['lifetime']['source']
# Perform a final check to ensure there are no technologies with
# only partially complete information
if all([len(x) > 0 for x in [
the_cost.keys(), the_perf.keys(), the_life.keys()]]) and (
math.isnan(the_life['average']) is False and
the_life['average'] != 0) and all(
[all([(math.isnan(x) is False and x != 0) for x in y.values()])
for y in [the_cost['typical'], the_perf['typical']]]):
# Add the cost, performance, and lifetime dicts into a master dict
# for the microsegment and envelope component specified by key_list
tech_data_dict = {'installed cost': the_cost,
'performance': the_perf,
'lifetime': the_life}
# If there are missing/incomplete data, simply return 0
else:
tech_data_dict = 0
# If no data were found, simply return 0
else:
tech_data_dict = 0
return tech_data_dict
def cost_converter(cost, units, bldg_class, bldg_type, conversions):
"""Convert envelope cost data to uniform units of $/ft^2 floor.
The envelope cost data are provided in the units of the
original source of the data, To ensure that the conversion from
the original data to the desired cost units of dollars per square
foot floor area is always consistent across all envelope data,
the conversion from the original units to the common and desired
units is performed by this function. The cost in its original
form is input to this function and the relationships between e.g.,
window area and wall area (i.e., window-wall ratio) are used to
convert from the original form to the final desired units.
In some cases, the conversion data are specified for EnergyPlus
building types, and then the conversion must incorporate both
the units conversion and applying the appropriate weights to
convert from EnergyPlus to Annual Energy Outlook building types.
Additionally, some data require multiple conversions, thus this
function might call itself again to complete the conversion
process. For example, window data must be converted from window
area to wall area, and then from wall area to floor area.
Args:
cost (float): The cost value that requires conversion.
units (str): The units of the cost indicated.
bldg_class (str): The applicable building class (i.e., either
"residential" or "commercial").
bldg_type (str): THe applicable specific building type (i.e.,
"single family home" or "small office") from the building
types used in the AEO.
conversions (dict): Cost unit conversions for envelope (and
heating and cooling equipment, though those conversions are
not used in this function) components, as well as the
mapping from EnergyPlus building prototypes to AEO building
types (as used in the microsegments file) to convert cost
data from sources that use the EnergyPlus building types
to the AEO building types.
Outputs:
The updated cost in the final desired units of $/ft^2 floor and
the revised units to verify that the conversion is complete.
"""
# Record the year (as YYYY) in the cost units for later addition to
# the adjusted cost units and then strip the year off the units
the_year = units[:4]
units = units[4:]
# Obtain the dict of cost conversion factors for the envelope
# components (for those components for which a conversion factor is
# provided); note that the keys for the desired level of the dict
# are specified separately and the functools.reduce function is
# used to extract the dict at the specified level
dict_keys = ['cost unit conversions', 'heating and cooling', 'demand']
env_cost_factors = ft.reduce(dict.get, dict_keys, conversions)
# Obtain the dict of building type conversion factors specified
# for the particular building type passed to the function; these
# data might be needed later contingent on the particular cost
# being converted; note the same method as above for extracting
# the data from a deeply nested dict
dict_keys = ['building type conversions', 'conversion data', 'value',
bldg_class, bldg_type]
bldg_type_conversions = ft.reduce(dict.get, dict_keys, conversions)
# Loop through the cost conversion factors and compare their
# specified "original units" with the units passed to this
# function to determine the relevant envelope component; the
# approach applied here yields the last match, so if multiple
# data requiring conversion are specified with the same units,
# this matching approach might not work as expected
for key in env_cost_factors.keys():
if env_cost_factors[key]['original units'] == units:
env_component = key
# Extract the conversion factors associated with the particular
# envelope component identified in the previous step and for the
# building class passed to this function; this function will
# trigger an error if no matching envelope component was
# identified by the previous step
dict_keys = [env_component, 'conversion factor', 'value', bldg_class]
bldg_specific_cost_conv = ft.reduce(dict.get, dict_keys, env_cost_factors)
# Identify the units for the forthcoming adjusted cost
adj_cost_units = env_cost_factors[env_component]['revised units']
# Add the year onto the anticipated revised units from the conversion
adj_cost_units = the_year + adj_cost_units
# Preallocate a variable for the adjusted cost value
adj_cost = 0
# Explore any remaining structure in env_cost_factors based on the
# structure of the data and the specific data provided, which
# varies by building type, to calculate the correct adjusted cost
# (though this function does not explicitly use building type to
# determine the appropriate approach for the calculation)
# If there is any additional structure beyond the building class
# (i.e., residential or commercial) level, calculate the adjusted
# cost depending on whether any building type conversions (i.e.,
# conversions from EnergyPlus to AEO building types) are specified
if isinstance(bldg_specific_cost_conv, dict):
# if the cost unit conversions for the current envelope
# component are specified by building class but do not require
# conversion from the EnergyPlus reference buildings to the AEO
# buildings, complete the calculation accordingly
if bldg_type_conversions is None:
adj_cost = cost * bldg_specific_cost_conv[bldg_type]
# If building type conversion is required, loop through the
# EnergyPlus building types associated with | |
in enumerate(results[category]):
if not self.leaderboard and score.score == 0:
results[category][i] = ScoreMock(
target=score.target,
score='DNS',
hits='',
golds='',
xs='',
disqualified=False,
retired=False,
placing=None,
)
return results
class ByRoundAllShot(ByRound, BaseResultMode):
slug = 'all-shot'
name = 'By round (include later shoots)'
include_later_shoots_anyway = True
class ByRoundProgressional(ByRound, BaseResultMode):
slug = 'by-round-progressional'
name = 'By round (progressional)'
def get_results(self, competition, scores, leaderboard=False, request=None):
if request and request.GET.get('up_to') and scores:
arrow_of_round = (int(request.GET['up_to']) + 1) * scores[0].target.session_entry.session_round.session.arrows_entered_per_end
cursor = connection.cursor()
cursor.execute('''
SELECT "scores_arrow"."score_id", SUM("scores_arrow"."arrow_value")
FROM "scores_arrow" WHERE "scores_arrow"."score_id" IN (
SELECT "scores_score"."id"
FROM "scores_score"
INNER JOIN "entries_targetallocation" ON ( "scores_score"."target_id" = "entries_targetallocation"."id" )
INNER JOIN "entries_sessionentry" ON ( "entries_targetallocation"."session_entry_id" = "entries_sessionentry"."id" )
INNER JOIN "entries_competitionentry" ON ( "entries_sessionentry"."competition_entry_id" = "entries_competitionentry"."id" )
WHERE "entries_competitionentry"."competition_id" = %s
) AND "scores_arrow"."arrow_of_round" <= %s GROUP BY "scores_arrow"."score_id";
''', (
competition.pk,
arrow_of_round,
))
rows = cursor.fetchall()
partial_scores = dict(rows)
cursor.close()
for score in scores:
partial = partial_scores.get(score.pk)
if partial is not None:
if not score.score == partial:
score.final_score = '(%s)' % score.score
else:
score.final_score = ''
score.partial_score = partial
score.score = partial
self.leaderboard = leaderboard
rounds = self.get_rounds(competition)
return OrderedDict((
self.get_section_for_round(round, competition),
self.get_round_results(competition, round, scores)
) for round in rounds)
class DoubleRound(BaseResultMode):
slug = 'double-round'
name = 'Double round'
def get_results(self, competition, scores, leaderboard=False, request=None):
"""Get the results for each category, by round.
Strategy:
- find all the rounds shot
- order by the first session they're shot in
- go through scores, adding to each category specific sets
- need to add a quacking score object which is the double
"""
self.leaderboard = leaderboard
rounds, valid_session_rounds = self.get_rounds(competition)
return OrderedDict((
self.get_section_for_round(round, competition),
self.get_round_results(competition, round, valid_session_rounds, scores)
) for round in rounds)
def get_rounds(self, competition):
from entries.models import SessionRound
session_rounds = SessionRound.objects.filter(session__competition=competition).order_by('session__start').exclude(
olympicsessionround__exclude_ranking_rounds=True,
)
rounds = []
valid_session_rounds = []
for round in session_rounds:
if round.shot_round not in rounds:
rounds.append(round.shot_round)
valid_session_rounds.append(round)
return rounds, valid_session_rounds
def get_round_results(self, competition, round, valid_session_rounds, scores):
results = OrderedDict()
for score in scores:
session_entry = score.target.session_entry
if session_entry.session_round.shot_round.id is not round.id:
continue
if session_entry.session_round not in valid_session_rounds:
continue
categories = self.get_categories_for_entry(competition, session_entry.competition_entry)
for category in categories:
if category not in results:
results[category] = {}
if session_entry.competition_entry not in results[category]:
results[category][session_entry.competition_entry] = []
results[category][session_entry.competition_entry].append(score)
for category, scores in results.items():
scores = OrderedDict((entry, rounds) for entry, rounds in scores.items() if len(rounds) >= 2)
if not scores:
results.pop(category)
continue
for entry in scores:
scores[entry] = sorted(scores[entry], key=lambda s: s.target.session_entry.session_round.session.start)[:2]
new_scores = [ScoreMock(
disqualified=any(s.disqualified for s in sub_scores),
retired=any(s.disqualified for s in sub_scores),
target=sub_scores[0].target,
score=sum(s.score for s in sub_scores),
hits=sum(s.hits for s in sub_scores),
golds=sum(s.golds for s in sub_scores),
xs=sum(s.xs for s in sub_scores),
) for entry, sub_scores in scores.items()]
if not self.leaderboard:
new_scores = filter(lambda s: s.score > 0, new_scores)
results[category] = self.sort_results(new_scores)
return results
def label_for_round(self, round):
return 'Double %s' % str(round)
class Team(BaseResultMode):
slug = 'team'
name = 'Teams'
def __init__(self, **kwargs):
super(Team, self).__init__(**kwargs)
self.include_distance_breakdown = False # always for teams
def get_results(self, competition, scores, leaderboard=False, request=None):
"""
Strategy:
- split by team
- find the top scores in each team
- filter out incomplete teams
- aggregate and order
- repeat for each team type
"""
clubs, round = self.split_by_club(scores, competition, leaderboard)
if not clubs:
return {}
results = OrderedDict()
for type in self.get_team_types(competition):
type_results = self.get_team_scores(competition, clubs, type)
if type_results:
results[type] = type_results
return {self.get_section_for_round(round, competition): results}
def split_by_club(self, scores, competition, leaderboard, valid_rounds=None):
from entries.models import Competition, SessionRound
if isinstance(competition, Competition) and not valid_rounds:
session_rounds = SessionRound.objects.filter(
session__competition=competition,
).exclude(
olympicsessionround__exclude_ranking_rounds=True,
).order_by('session__start').select_related('shot_round')
elif not valid_rounds:
# We have a league leg
session_rounds = SessionRound.objects.filter(
session__competition__in=competition.competitions.all(),
).exclude(
olympicsessionround__exclude_ranking_rounds=True,
).order_by('session__start').select_related('shot_round')
else:
session_rounds = valid_rounds
round = None
clubs = {}
for score in scores:
if not leaderboard and not score.score:
continue
session_entry = score.target.session_entry
if session_entry.session_round not in session_rounds:
continue
if round is None:
round = session_entry.session_round.shot_round
club = session_entry.competition_entry.team_name()
if not club:
continue
if session_entry.index > 1 and competition.exclude_later_shoots:
continue
if score.disqualified or score.retired:
continue
if club not in clubs:
clubs[club] = []
clubs[club].append(score)
return clubs, round
def get_team_types(self, competition):
# TODO: support team types properly
if competition.use_county_teams:
return [
'Recurve',
'Compound',
'Barebow',
'Longbow',
'Junior Recurve',
'Junior Compound',
]
team_types = []
if competition.team_size:
if competition.split_gender_teams:
team_types += ['Gents non-compound', 'Ladies non-compound']
else:
team_types.append('Non-compound')
if competition.recurve_team_size is not None:
team_types.append('Recurve')
if competition.barebow_team_size is not None:
team_types.append('Barebow')
if competition.compound_team_size is not None:
team_types.append('Compound')
if competition.junior_team_size is not None:
team_types.append('Junior')
if competition.novice_team_size is not None:
team_types.append('Novice')
return team_types
def get_team_scores(self, competition, clubs, type):
club_results = []
for club, club_scores in clubs.items():
club_scores = [s for s in club_scores if self.is_valid_for_type(s, type, competition)]
if competition.combine_rounds_for_team_scores:
club_scores = self.combine_rounds(club_scores)
club_scores = sorted(club_scores, key=lambda s: (s.score, s.hits, s.golds, s.xs), reverse=True)
team_size = competition.team_size
if type == 'Novice' and competition.novice_team_size:
team_size = competition.novice_team_size
if type == 'Compound' and competition.compound_team_size:
team_size = competition.compound_team_size
if type in ['Longbow', 'Barebow'] and competition.use_county_teams:
# bit of a hack to treat compound team size as "minor team size"
team_size = competition.compound_team_size
if type == 'Recurve' and competition.recurve_team_size:
team_size = competition.recurve_team_size
if type == 'Barebow' and competition.barebow_team_size:
team_size = competition.barebow_team_size
if type == 'Junior' and competition.junior_team_size:
team_size = competition.junior_team_size
if competition.force_mixed_teams or (competition.force_mixed_teams_recurve_only and type == 'Recurve'):
gent_found = False
lady_found = False
mixed_team_found = False
for i, score in enumerate(club_scores):
if score.target.session_entry.competition_entry.archer.gender == 'G':
gent_found = True
else:
lady_found = True
if gent_found and lady_found:
if i >= team_size:
club_scores = club_scores[:team_size - 1] + [score]
else:
club_scores = club_scores[:team_size]
mixed_team_found = True
break
if not mixed_team_found:
club_scores = []
else:
club_scores = club_scores[:team_size]
if not club_scores:
continue
if len(club_scores) < team_size and not competition.allow_incomplete_teams:
continue
if getattr(club_scores[0], 'components', None):
club_scores = sum((s.components for s in club_scores), [])
if competition.combine_rounds_for_team_scores and not competition.allow_incomplete_teams:
sessions = competition.session_set.filter(sessionround__isnull=False).distinct()
if len(club_scores) < (team_size * len(sessions)):
continue
team = ScoreMock(
score=sum(s.score for s in club_scores),
hits=sum(s.hits for s in club_scores),
golds=sum(s.golds for s in club_scores),
xs=sum(s.xs for s in club_scores),
club=club,
team=club_scores,
)
club_results.append((club, team))
return self.sort_results([c[1] for c in club_results])
def is_valid_for_type(self, score, type, competition):
if score.target.session_entry.competition_entry.guest:
return False
if competition.use_county_teams:
bowstyle = score.target.session_entry.competition_entry.bowstyle.name
is_junior = score.target.session_entry.competition_entry.age == 'J'
if type in ['Recurve', 'Compound', 'Barebow', 'Longbow']:
return not is_junior and bowstyle == type
if type in ['Junior Recurve', 'Junior Compound']:
return is_junior and 'Junior %s' % bowstyle == type
is_non_compound = not score.target.session_entry.competition_entry.bowstyle.name == 'Compound'
if type == 'Non-compound':
if not competition.novices_in_experienced_teams:
return is_non_compound and score.target.session_entry.competition_entry.novice == 'E'
return is_non_compound
if type == 'Gents non-compound':
if not competition.novices_in_experienced_teams:
return (is_non_compound and
score.target.session_entry.competition_entry.novice == 'E' and
score.target.session_entry.competition_entry.archer.gender == 'G')
return is_non_compound and score.target.session_entry.competition_entry.archer.gender == 'G'
if type == 'Ladies non-compound':
if not competition.novices_in_experienced_teams:
return (is_non_compound and
score.target.session_entry.competition_entry.novice == 'E' and
score.target.session_entry.competition_entry.archer.gender == 'L')
return is_non_compound and score.target.session_entry.competition_entry.archer.gender == 'L'
if type in ['Recurve', 'Compound', 'Barebow', 'Longbow']:
bowstyle = score.target.session_entry.competition_entry.bowstyle.name
return bowstyle == type
if type == 'Novice':
return is_non_compound and score.target.session_entry.competition_entry.novice == 'N'
if type == 'Junior':
is_junior = score.target.session_entry.competition_entry.age == 'J'
if not competition.novices_in_experienced_teams:
return is_junior and is_non_compound and score.target.session_entry.competition_entry.novice == 'E'
return is_junior and is_non_compound
def get_main_headers(self, competition):
return ['County' if competition.use_county_teams else 'Club']
def label_for_round(self, round):
return 'Team'
def combine_rounds(self, club_scores):
combined_scores = []
for competition_entry, scores in itertools.groupby(club_scores,
lambda s: s.target.session_entry.competition_entry):
scores = list(scores)
combined_scores.append(ScoreMock(
score=sum(s.score for s in scores),
hits=sum(s.hits for s in scores),
golds=sum(s.golds for s in scores),
xs=sum(s.xs for s in scores),
target=scores[0].target,
components=scores,
))
return combined_scores
class H2HSeedings(ByRound, Team, BaseResultMode):
slug = 'seedings'
name = 'Seedings'
def get_rounds(self, competition):
from olympic.models import OlympicSessionRound
session_rounds = OlympicSessionRound.objects.filter(session__competition=competition).select_related('category').prefetch_related('ranking_rounds', 'category__bowstyles').order_by('id')
self.categories = [session_round.category for session_round in session_rounds]
return session_rounds
def get_results(self, competition, scores, leaderboard=False, request=None):
self.leaderboard = leaderboard
rounds = self.get_rounds(competition)
results = OrderedDict()
for round in rounds:
section = self.get_section_for_round(round, competition)
section.seedings_confirmed = round.seeding_set.exists()
section_scores = self.filter_scores(competition, scores, round.category)
section_results = self.get_round_results(competition, round, section_scores, section.seedings_confirmed, leaderboard)
results[section] = section_results
return results
def get_round_results(self, competition, round, scores, seedings_confirmed, leaderboard):
if not round.shot_round.team_type:
if seedings_confirmed:
results = []
score_lookup = {score.target.session_entry.competition_entry: score for score in scores}
| |
"""The tests for the MQTT siren platform."""
import copy
from unittest.mock import patch
import pytest
from homeassistant.components import siren
from homeassistant.components.siren.const import ATTR_VOLUME_LEVEL
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
STATE_UNKNOWN,
)
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_encoding_subscribable_topics,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_publishing_with_custom_encoding,
help_test_reloadable,
help_test_reloadable_late,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_setting_blocked_attribute_via_mqtt_json_message,
help_test_setup_manual_entity_from_yaml,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
siren.DOMAIN: {"platform": "mqtt", "name": "test", "command_topic": "test-topic"}
}
async def async_turn_on(hass, entity_id=ENTITY_MATCH_ALL, parameters={}) -> None:
"""Turn all or specified siren on."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data.update(parameters)
await hass.services.async_call(siren.DOMAIN, SERVICE_TURN_ON, data, blocking=True)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL) -> None:
"""Turn all or specified siren off."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
await hass.services.async_call(siren.DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
async def test_controlling_state_via_topic(hass, mqtt_mock_entry_with_yaml_config):
"""Test the controlling state via topic."""
assert await async_setup_component(
hass,
siren.DOMAIN,
{
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
},
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
state = hass.states.get("siren.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "1")
state = hass.states.get("siren.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "0")
state = hass.states.get("siren.test")
assert state.state == STATE_OFF
async def test_sending_mqtt_commands_and_optimistic(
hass, mqtt_mock_entry_with_yaml_config
):
"""Test the sending MQTT commands in optimistic mode."""
assert await async_setup_component(
hass,
siren.DOMAIN,
{
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"qos": "2",
}
},
)
await hass.async_block_till_done()
mqtt_mock = await mqtt_mock_entry_with_yaml_config()
state = hass.states.get("siren.test")
assert state.state == STATE_OFF
assert state.attributes.get(ATTR_ASSUMED_STATE)
await async_turn_on(hass, entity_id="siren.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", '{"state": "beer on"}', 2, False
)
mqtt_mock.async_publish.reset_mock()
state = hass.states.get("siren.test")
assert state.state == STATE_ON
await async_turn_off(hass, entity_id="siren.test")
mqtt_mock.async_publish.assert_called_once_with(
"command-topic", '{"state": "beer off"}', 2, False
)
state = hass.states.get("siren.test")
assert state.state == STATE_OFF
async def test_controlling_state_via_topic_and_json_message(
hass, mqtt_mock_entry_with_yaml_config, caplog
):
"""Test the controlling state via topic and JSON message."""
assert await async_setup_component(
hass,
siren.DOMAIN,
{
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"state_value_template": "{{ value_json.val }}",
}
},
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
state = hass.states.get("siren.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer on"}')
state = hass.states.get("siren.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", '{"val": null }')
state = hass.states.get("siren.test")
assert state.state == STATE_UNKNOWN
async_fire_mqtt_message(hass, "state-topic", '{"val":"beer off"}')
state = hass.states.get("siren.test")
assert state.state == STATE_OFF
async def test_controlling_state_and_attributes_with_json_message_without_template(
hass, mqtt_mock_entry_with_yaml_config, caplog
):
"""Test the controlling state via topic and JSON message without a value template."""
assert await async_setup_component(
hass,
siren.DOMAIN,
{
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": "beer on",
"payload_off": "beer off",
"available_tones": ["ping", "siren", "bell"],
}
},
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
state = hass.states.get("siren.test")
assert state.state == STATE_UNKNOWN
assert state.attributes.get(siren.ATTR_TONE) is None
assert state.attributes.get(siren.ATTR_DURATION) is None
assert state.attributes.get(siren.ATTR_VOLUME_LEVEL) is None
async_fire_mqtt_message(
hass,
"state-topic",
'{"state":"beer on", "tone": "bell", "duration": 10, "volume_level": 0.5 }',
)
state = hass.states.get("siren.test")
assert state.state == STATE_ON
assert state.attributes.get(siren.ATTR_TONE) == "bell"
assert state.attributes.get(siren.ATTR_DURATION) == 10
assert state.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.5
async_fire_mqtt_message(
hass,
"state-topic",
'{"state":"beer off", "duration": 5, "volume_level": 0.6}',
)
state = hass.states.get("siren.test")
assert state.state == STATE_OFF
assert state.attributes.get(siren.ATTR_TONE) == "bell"
assert state.attributes.get(siren.ATTR_DURATION) == 5
assert state.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.6
# Test validation of received attributes, invalid
async_fire_mqtt_message(
hass,
"state-topic",
'{"state":"beer on", "duration": 6, "volume_level": 2 }',
)
state = hass.states.get("siren.test")
assert (
"Unable to update siren state attributes from payload '{'duration': 6, 'volume_level': 2}': value must be at most 1 for dictionary value @ data['volume_level']"
in caplog.text
)
assert state.state == STATE_OFF
assert state.attributes.get(siren.ATTR_TONE) == "bell"
assert state.attributes.get(siren.ATTR_DURATION) == 5
assert state.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.6
async_fire_mqtt_message(
hass,
"state-topic",
"{}",
)
assert state.state == STATE_OFF
assert state.attributes.get(siren.ATTR_TONE) == "bell"
assert state.attributes.get(siren.ATTR_DURATION) == 5
assert state.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.6
assert (
"Ignoring empty payload '{}' after rendering for topic state-topic"
in caplog.text
)
async def test_filtering_not_supported_attributes_optimistic(
hass, mqtt_mock_entry_with_yaml_config
):
"""Test setting attributes with support flags optimistic."""
config = {
"platform": "mqtt",
"command_topic": "command-topic",
"available_tones": ["ping", "siren", "bell"],
}
config1 = copy.deepcopy(config)
config1["name"] = "test1"
config1["support_duration"] = False
config2 = copy.deepcopy(config)
config2["name"] = "test2"
config2["support_volume_set"] = False
config3 = copy.deepcopy(config)
config3["name"] = "test3"
del config3["available_tones"]
assert await async_setup_component(
hass,
siren.DOMAIN,
{siren.DOMAIN: [config1, config2, config3]},
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
state1 = hass.states.get("siren.test1")
assert state1.state == STATE_OFF
assert siren.ATTR_DURATION not in state1.attributes
assert siren.ATTR_AVAILABLE_TONES in state1.attributes
assert siren.ATTR_TONE in state1.attributes
assert siren.ATTR_VOLUME_LEVEL in state1.attributes
await async_turn_on(
hass,
entity_id="siren.test1",
parameters={
siren.ATTR_DURATION: 22,
siren.ATTR_TONE: "ping",
ATTR_VOLUME_LEVEL: 0.88,
},
)
state1 = hass.states.get("siren.test1")
assert state1.attributes.get(siren.ATTR_TONE) == "ping"
assert state1.attributes.get(siren.ATTR_DURATION) is None
assert state1.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.88
state2 = hass.states.get("siren.test2")
assert siren.ATTR_DURATION in state2.attributes
assert siren.ATTR_AVAILABLE_TONES in state2.attributes
assert siren.ATTR_TONE in state2.attributes
assert siren.ATTR_VOLUME_LEVEL not in state2.attributes
await async_turn_on(
hass,
entity_id="siren.test2",
parameters={
siren.ATTR_DURATION: 22,
siren.ATTR_TONE: "ping",
ATTR_VOLUME_LEVEL: 0.88,
},
)
state2 = hass.states.get("siren.test2")
assert state2.attributes.get(siren.ATTR_TONE) == "ping"
assert state2.attributes.get(siren.ATTR_DURATION) == 22
assert state2.attributes.get(siren.ATTR_VOLUME_LEVEL) is None
state3 = hass.states.get("siren.test3")
assert siren.ATTR_DURATION in state3.attributes
assert siren.ATTR_AVAILABLE_TONES not in state3.attributes
assert siren.ATTR_TONE not in state3.attributes
assert siren.ATTR_VOLUME_LEVEL in state3.attributes
await async_turn_on(
hass,
entity_id="siren.test3",
parameters={
siren.ATTR_DURATION: 22,
siren.ATTR_TONE: "ping",
ATTR_VOLUME_LEVEL: 0.88,
},
)
state3 = hass.states.get("siren.test3")
assert state3.attributes.get(siren.ATTR_TONE) is None
assert state3.attributes.get(siren.ATTR_DURATION) == 22
assert state3.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.88
async def test_filtering_not_supported_attributes_via_state(
hass, mqtt_mock_entry_with_yaml_config
):
"""Test setting attributes with support flags via state."""
config = {
"platform": "mqtt",
"command_topic": "command-topic",
"available_tones": ["ping", "siren", "bell"],
}
config1 = copy.deepcopy(config)
config1["name"] = "test1"
config1["state_topic"] = "state-topic1"
config1["support_duration"] = False
config2 = copy.deepcopy(config)
config2["name"] = "test2"
config2["state_topic"] = "state-topic2"
config2["support_volume_set"] = False
config3 = copy.deepcopy(config)
config3["name"] = "test3"
config3["state_topic"] = "state-topic3"
del config3["available_tones"]
assert await async_setup_component(
hass,
siren.DOMAIN,
{siren.DOMAIN: [config1, config2, config3]},
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
state1 = hass.states.get("siren.test1")
assert state1.state == STATE_UNKNOWN
assert siren.ATTR_DURATION not in state1.attributes
assert siren.ATTR_AVAILABLE_TONES in state1.attributes
assert siren.ATTR_TONE in state1.attributes
assert siren.ATTR_VOLUME_LEVEL in state1.attributes
async_fire_mqtt_message(
hass,
"state-topic1",
'{"state":"ON", "duration": 22, "tone": "ping", "volume_level": 0.88}',
)
await hass.async_block_till_done()
state1 = hass.states.get("siren.test1")
assert state1.attributes.get(siren.ATTR_TONE) == "ping"
assert state1.attributes.get(siren.ATTR_DURATION) is None
assert state1.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.88
state2 = hass.states.get("siren.test2")
assert siren.ATTR_DURATION in state2.attributes
assert siren.ATTR_AVAILABLE_TONES in state2.attributes
assert siren.ATTR_TONE in state2.attributes
assert siren.ATTR_VOLUME_LEVEL not in state2.attributes
async_fire_mqtt_message(
hass,
"state-topic2",
'{"state":"ON", "duration": 22, "tone": "ping", "volume_level": 0.88}',
)
await hass.async_block_till_done()
state2 = hass.states.get("siren.test2")
assert state2.attributes.get(siren.ATTR_TONE) == "ping"
assert state2.attributes.get(siren.ATTR_DURATION) == 22
assert state2.attributes.get(siren.ATTR_VOLUME_LEVEL) is None
state3 = hass.states.get("siren.test3")
assert siren.ATTR_DURATION in state3.attributes
assert siren.ATTR_AVAILABLE_TONES not in state3.attributes
assert siren.ATTR_TONE not in state3.attributes
assert siren.ATTR_VOLUME_LEVEL in state3.attributes
async_fire_mqtt_message(
hass,
"state-topic3",
'{"state":"ON", "duration": 22, "tone": "ping", "volume_level": 0.88}',
)
await hass.async_block_till_done()
state3 = hass.states.get("siren.test3")
assert state3.attributes.get(siren.ATTR_TONE) is None
assert state3.attributes.get(siren.ATTR_DURATION) == 22
assert state3.attributes.get(siren.ATTR_VOLUME_LEVEL) == 0.88
async def test_availability_when_connection_lost(
hass, mqtt_mock_entry_with_yaml_config
):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock_entry_with_yaml_config, siren.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock_entry_with_yaml_config):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock_entry_with_yaml_config, siren.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock_entry_with_yaml_config):
"""Test availability by default payload with defined topic."""
config = {
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
}
await help_test_default_availability_payload(
hass,
mqtt_mock_entry_with_yaml_config,
siren.DOMAIN,
config,
True,
"state-topic",
"1",
)
async def test_custom_availability_payload(hass, mqtt_mock_entry_with_yaml_config):
"""Test availability by custom payload with defined topic."""
config = {
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
}
}
await help_test_custom_availability_payload(
hass,
mqtt_mock_entry_with_yaml_config,
siren.DOMAIN,
config,
True,
"state-topic",
"1",
)
async def test_custom_state_payload(hass, mqtt_mock_entry_with_yaml_config):
"""Test the state payload."""
assert await async_setup_component(
hass,
siren.DOMAIN,
{
siren.DOMAIN: {
"platform": "mqtt",
"name": "test",
"state_topic": "state-topic",
"command_topic": "command-topic",
"payload_on": 1,
"payload_off": 0,
"state_on": "HIGH",
"state_off": "LOW",
}
},
)
await hass.async_block_till_done()
await mqtt_mock_entry_with_yaml_config()
state = hass.states.get("siren.test")
assert state.state == STATE_UNKNOWN
assert not state.attributes.get(ATTR_ASSUMED_STATE)
async_fire_mqtt_message(hass, "state-topic", "HIGH")
state = hass.states.get("siren.test")
assert state.state == STATE_ON
async_fire_mqtt_message(hass, "state-topic", "LOW")
state = hass.states.get("siren.test")
assert state.state == STATE_OFF
async def test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock_entry_with_yaml_config
):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock_entry_with_yaml_config, siren.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock_entry_no_yaml_config
):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_blocked_attribute_via_mqtt_json_message(
hass, mqtt_mock_entry_no_yaml_config, siren.DOMAIN, DEFAULT_CONFIG, {}
)
async def test_setting_attribute_with_template(hass, mqtt_mock_entry_with_yaml_config):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock_entry_with_yaml_config, siren.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(
hass, mqtt_mock_entry_with_yaml_config, caplog
):
| |
"""
A Simple logging system 4 python based around the power of sqlite3.
Advantages over python's built in logging module:
1. With logging I've often found myself going back to the config dict constantly to increase verbosity or
remove noise and have to restart the program every time to find out whats going on. This is system can be
less productive than just print statements being commented in/out.
With lg36 you can log often and in detail, and then just create views to for example:
- exclude a noisy file.
- exclude a noisy file but re-include warn+ msgs from it.
- append logs from multiple program runs and separate them into sessions.
- view only the last session.
- compare current session to one before it, and see the extra stuff that happened now.
- You can dump the sql db into a text file and text search.
- sqlite3 offers FTS to top it all up.
2. It turns out, 90% of the time, ppl dont use logging because they want to write custom handlers that do all sorts of
exotic behavior. If you want that then go ahead and use logging. Most often ppl use logging to achieve one very simple
thing, to log into stdout (possibly not so verbosely) and to log a more complete version to a file somewhere.
sqlite3 is just a superior version of that log file.
3. code over configuration. lg36 is a small file that you can drag and drop into your project. a huge amount
of functionality is achieved in roughly 200 lines, ignoring comments/whitespace and some formalities.
You can customize lg36 too even more easily.
----------
TODO: add a periodic job system, that would run, say, every 1000 msgs (user knob), or every 600 seconds (user knob),
that would clean up the DSS DB. i.e. by trimming old msgs to prevent the db from growing unbounded or would export a
JSON or dump into log stash, or s3 or something like that.
TODO: maybe add a db file sync feature for each log record that is > INFO
TODO: integrate the knobs here with "knob man"
TODO: to improve/solve the knobs handling:
make it a class and take all the knobs as init args. if they are optional put default args, document them in the
docstring, ... now the knobs section can go into project knobs, ... and be given out during init.
TODO: add a throttle option, in case the queue is filling up much faster than can be flushed.
something like a short delay to each log.dbg(), log.info(), ....
depending on q size.
the delay can also rise if things dont improve, while adding more delay to dbg than to warn err crit.
"""
import os
import sys
import time
import shutil
from dataclasses import dataclass
import typing
from pathlib import Path
import enum
import inspect
import traceback
import multiprocessing
import threading
import queue
import sqlite3
# ======================================================================================================================
# ======================================================================================================================
# ================================================================================================================ Knobs
# Enable/Disable any log sinks.
_STDOUT_LOGGING_ENABLED = True
_DSS_ENABLED = True
# ******************** level filters
_STDOUT_LVL_FILTER_STRING = "INFO"
# DSS is supposed to be a full detail dump into sqlite3. This should generally be set to dbug always, and feel free
# to be very verbose. If you want a filtered version, add views under the deep knobs section.
_DSS_LVL_FILTER_STRING = "DBUG"
# ******************** log file/dir, disk or ram, must be string.
#_DSS_LOG_DIR = f'/tmp/lg36/app_{int(time.time())}_{os.urandom(4).hex()}/'
_DSS_LOG_DIR = f'/home/zu/x1ws/lg36p/ignoreme/'
# _DSS_LOG_FILE = ':memory:'
_DSS_LOG_FILE = os.path.join(_DSS_LOG_DIR, 'lg36.db')
# ******************** additional options.
_SQLITE_PRAGMAS = [
# if sqlite performance is poor, dont change isolation_level=None (autocommit essentially),
# instead disable synchronous writes. I believe its issuing a sync syscall after each transaction.
# I once measured 2 seconds to flush ~ 500 msgs if this is left ON.
"PRAGMA synchronous = OFF"
]
# Comment/Uncomment to start with fresh files or possibly append existing logs.
try:
shutil.rmtree(_DSS_LOG_DIR, ignore_errors=True)
except:
pass
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================== HIGHLY COOL Knobs (SQL views)
# This is the most magical thing about lg36. Instead of writing file handlers,
# subsystem filters, level this, subclass that, format this format that, Declare any views you want here.
# All of these views will be created during init.
# These views would correspond to the different ways you could've configured logging with various config dicts,
# except you dont have to constantly change these to see whats going on. All the facts are saved, you just choose
# which view you want to look at at any moment in time. The idea is you can declare lots of views during dev and just
# leave them there later on also. No need to drop the ones you are not using, because its just a free view.
# You can control dev/prod/release differences with debug functions at the bottom of this file or somewhere else.
# It just a matter of dumping a view to stdout during dev maybe and not doing that later.
# For lg36 table schema look below this section.
_DEEP_VIEWS = [
# ******************************************************************************************************************
# *********************************************************************************************** DEFAULT LG36 Views
# ******************************************************************************************************************
# these are just some common/basic views provided by lg36. You can add/remove to this list as needed.
# half verbose
""" CREATE VIEW IF NOT EXISTS lg36_med AS
SELECT mid, msg_lvl, session_id, unix_time, SUBSTR(caller_filename, -20) AS fname_last20, caller_lineno,
caller_funcname, pname, tname, log_msg
FROM lg36;
""",
# short, most useful columns
""" CREATE VIEW IF NOT EXISTS lg36_shrt AS
SELECT mid, msg_lvl, SUBSTR(caller_filename, -20) AS fname_last20, caller_lineno, caller_funcname, log_msg
FROM lg36;
""",
# short last session only (most recent session), dont use select *, column names will be lost at least in DB brwser
""" CREATE VIEW IF NOT EXISTS lg36_shrt_ls AS
SELECT mid, msg_lvl, SUBSTR(caller_filename, -20) AS fname_last20, caller_lineno, caller_funcname, log_msg
FROM lg36
WHERE session_id IN (SELECT session_id FROM lg36 ORDER BY mid DESC LIMIT 1);
""",
# warn level or higher
""" CREATE VIEW IF NOT EXISTS lg36_shrt_ls_warn AS
SELECT mid, msg_lvl, SUBSTR(caller_filename, -20) AS fname_last20, caller_lineno, caller_funcname, log_msg
FROM lg36
WHERE session_id IN (SELECT session_id FROM lg36 ORDER BY mid DESC LIMIT 1)
AND msg_lvl NOT IN ('DBUG', 'INFO');
""",
# exclude some files, LIKE rules:
# wildcard char % matches zero or more of any char
# wildcard char _ matches exactly one single of any char
# """
# CREATE VIEW IF NOT EXISTS lg36_shrt_ls_no_x_file AS
# SELECT mid, msg_lvl, SUBSTR(caller_filename, -1, -20), caller_lineno, caller_funcname, log_msg
# FROM lg36
# WHERE session_id IN (SELECT session_id FROM lg36 ORDER BY mid DESC LIMIT 1)
# AND caller_filename NOT LIKE "%my_demo_xcluded_file.py";
# """,
# ********** stats and distinct info threads and process
""" CREATE VIEW IF NOT EXISTS stats_tname AS SELECT tname, count(tname) FROM lg36 GROUP BY tname; """,
""" CREATE VIEW IF NOT EXISTS stats_tid AS SELECT tid, count(tid) FROM lg36 GROUP BY tid; """,
""" CREATE VIEW IF NOT EXISTS stats_pname AS SELECT pname, count(pname) FROM lg36 GROUP BY pname; """,
""" CREATE VIEW IF NOT EXISTS stats_pid AS SELECT pid, count(pid) FROM lg36 GROUP BY pid; """,
# ******************************************************************************************************************
# **************************************************************************************************#* DBG/DEV views
# ******************************************************************************************************************
# Add additional views here that can help inspect each subsystem separately.
]
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
# ======================================================================================================================
# =============================================================================================== CONSTANTS, ENUMS, MISC
# ******************** lg 36 table
# SQLite natively supports only the types TEXT, INTEGER, REAL, BLOB and NULL.
# mid: message id.
# session_id: unique id created at init time, therefore unique to each init.
# unix_time: unix time stamp with max available precision cast to string.
# msg_lvl: log level of the msg. i.e. DBUG, INFO, WARN, ERRR, CRIT (yes all are 4 chars).
# caller_filename: reflection derived information on who made the log call.
# caller_lineno: reflection derived information on who made the log call.
# pname: process name
# tname: thread name
_LG36_SCHEMA = """
CREATE TABLE IF NOT EXISTS lg36(
mid INTEGER PRIMARY KEY AUTOINCREMENT,
session_id TEXT,
unix_time TEXT,
msg_lvl TEXT,
caller_filename TEXT,
caller_lineno TEXT,
caller_funcname TEXT,
pname TEXT,
pid TEXT,
tname TEXT,
tid TEXT,
log_msg TEXT);
"""
# ANSI color sequences
_ANSI_RED = "\u001b[31m"
_ANSI_GREEN = "\u001b[32m"
_ANSI_YELLOW = "\u001b[33m"
_ANSI_BLUE = "\u001b[34m"
_ANSI_MAGENTA = "\u001b[35m"
_ANSI_CYAN = "\u001b[36m"
_ANSI_RESET = "\u001b[0m"
# You can do > < >= <= == comparison on these like so:
# lvl_1.value >= lvl_2.value:
class LGLVL(enum.Enum):
DBUG = 10 # A filter set to LGLVL.DEBUG means filter nothing.
INFO = 20
WARN = | |
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Helpers to store analysis reports.
"""
import base64
from datetime import datetime
from hashlib import sha256
import os
import zlib
import sqlalchemy
import codechecker_api_shared
from codechecker_api.codeCheckerDBAccess_v6 import ttypes
from codechecker_common.logger import get_logger
from codechecker_common.util import load_json_or_empty
from ..database.run_db_model import AnalyzerStatistic, \
BugPathEvent, BugReportPoint, File, Run, RunHistory, Report, FileContent, \
ExtendedReportData
from .thrift_enum_helper import report_extended_data_type_str
LOG = get_logger('system')
def metadata_info(metadata_file):
check_commands = []
check_durations = []
cc_version = None
analyzer_statistics = {}
checkers = {}
if not os.path.isfile(metadata_file):
return check_commands, check_durations, cc_version, \
analyzer_statistics, checkers
metadata_dict = load_json_or_empty(metadata_file, {})
if 'command' in metadata_dict:
check_commands.append(metadata_dict['command'])
if 'timestamps' in metadata_dict:
check_durations.append(
float(metadata_dict['timestamps']['end'] -
metadata_dict['timestamps']['begin']))
# Get CodeChecker version.
cc_version = metadata_dict.get('versions', {}).get('codechecker')
# Get analyzer statistics.
analyzer_statistics = metadata_dict.get('analyzer_statistics', {})
checkers = metadata_dict.get('checkers', {})
return check_commands, check_durations, cc_version, analyzer_statistics, \
checkers
def collect_paths_events(report, file_ids, files):
"""
This function creates the BugPathPos and BugPathEvent objects which belong
to a report.
report -- A report object from the parsed plist file.
file_ids -- A dictionary which maps the file paths to file IDs in the
database.
files -- A list containing the file paths from the parsed plist file. The
order of this list must be the same as in the plist file.
#TODO Multiple ranges could belong to an event or control node.
Only the first range from the list of ranges is stored into the
database. Further improvement can be to store and view all ranges
if there are more than one.
"""
bug_paths = []
bug_events = []
bug_extended_data = []
events = [i for i in report.bug_path if i.get('kind') == 'event']
# Create remaining data for bugs and send them to the server. In plist
# file the source and target of the arrows are provided as starting and
# ending ranges of the arrow. The path A->B->C is given as A->B and
# B->C, thus range B is provided twice. So in the loop only target
# points of the arrows are stored, and an extra insertion is done for
# the source of the first arrow before the loop.
report_path = [i for i in report.bug_path if i.get('kind') == 'control']
if report_path:
start_range = report_path[0]['edges'][0]['start']
start1_line = start_range[0]['line']
start1_col = start_range[0]['col']
start2_line = start_range[1]['line']
start2_col = start_range[1]['col']
source_file_path = files[start_range[1]['file']]
bug_paths.append(ttypes.BugPathPos(
start1_line,
start1_col,
start2_line,
start2_col,
file_ids[source_file_path]))
for path in report_path:
try:
end_range = path['edges'][0]['end']
end1_line = end_range[0]['line']
end1_col = end_range[0]['col']
end2_line = end_range[1]['line']
end2_col = end_range[1]['col']
source_file_path = files[end_range[1]['file']]
bug_paths.append(ttypes.BugPathPos(
end1_line,
end1_col,
end2_line,
end2_col,
file_ids[source_file_path]))
except IndexError:
# Edges might be empty nothing can be stored.
continue
for event in events:
file_path = files[event['location']['file']]
start_loc = event['location']
end_loc = event['location']
# Range can provide more precise location information.
# Use that if available.
ranges = event.get("ranges")
if ranges:
start_loc = ranges[0][0]
end_loc = ranges[0][1]
bug_events.append(ttypes.BugPathEvent(
start_loc['line'],
start_loc['col'],
end_loc['line'],
end_loc['col'],
event['message'],
file_ids[file_path]))
for macro in report.macro_expansions:
if not macro['expansion']:
continue
file_path = files[macro['location']['file']]
start_loc = macro['location']
end_loc = macro['location']
# Range can provide more precise location information.
# Use that if available.
ranges = macro.get("ranges")
if ranges:
start_loc = ranges[0][0]
end_loc = ranges[0][1]
bug_extended_data.append(ttypes.ExtendedReportData(
ttypes.ExtendedReportDataType.MACRO,
start_loc['line'],
start_loc['col'],
end_loc['line'],
end_loc['col'],
macro['expansion'],
file_ids[file_path]))
for note in report.notes:
if not note['message']:
continue
file_path = files[note['location']['file']]
start_loc = note['location']
end_loc = note['location']
# Range can provide more precise location information.
# Use that if available.
ranges = note.get("ranges")
if ranges:
start_loc = ranges[0][0]
end_loc = ranges[0][1]
bug_extended_data.append(ttypes.ExtendedReportData(
ttypes.ExtendedReportDataType.NOTE,
start_loc['line'],
start_loc['col'],
end_loc['line'],
end_loc['col'],
note['message'],
file_ids[file_path]))
return bug_paths, bug_events, bug_extended_data,
def store_bug_events(session, bugevents, report_id):
"""
"""
for i, event in enumerate(bugevents):
bpe = BugPathEvent(event.startLine,
event.startCol,
event.endLine,
event.endCol,
i,
event.msg,
event.fileId,
report_id)
session.add(bpe)
def store_bug_path(session, bugpath, report_id):
for i, piece in enumerate(bugpath):
brp = BugReportPoint(piece.startLine,
piece.startCol,
piece.endLine,
piece.endCol,
i,
piece.fileId,
report_id)
session.add(brp)
def store_extended_bug_data(session, extended_data, report_id):
"""
Add extended bug data objects to the database session.
"""
for data in extended_data:
data_type = report_extended_data_type_str(data.type)
red = ExtendedReportData(data.startLine,
data.startCol,
data.endLine,
data.endCol,
data.message,
data.fileId,
report_id,
data_type)
session.add(red)
def is_same_event_path(report_id, events, session):
"""
Checks if the given event path is the same as the one in the
events argument.
"""
try:
q = session.query(BugPathEvent) \
.filter(BugPathEvent.report_id == report_id) \
.order_by(BugPathEvent.order)
for i, point2 in enumerate(q):
if i == len(events):
return False
point1 = events[i]
file1name = os.path.basename(session.query(File).
get(point1.fileId).filepath)
file2name = os.path.basename(session.query(File).
get(point2.file_id).filepath)
if point1.startCol != point2.col_begin or \
point1.endCol != point2.col_end or \
file1name != file2name or \
point1.msg != point2.msg:
return False
return True
except Exception as ex:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
str(ex))
def addCheckerRun(session, command, name, tag, username,
run_history_time, version, force, codechecker_version,
statistics, description):
"""
Store checker run related data to the database.
By default updates the results if name already exists.
Using the force flag removes existing analysis results for a run.
"""
try:
LOG.debug("adding checker run")
run = session.query(Run).filter(Run.name == name).one_or_none()
if run and force:
# Clean already collected results.
if not run.can_delete:
# Deletion is already in progress.
msg = "Can't delete " + str(run.id)
LOG.debug(msg)
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.DATABASE,
msg)
LOG.info('Removing previous analysis results ...')
session.delete(run)
# Not flushing after delete leads to a constraint violation error
# later, when adding run entity with the same name as the old one.
session.flush()
checker_run = Run(name, version, command)
session.add(checker_run)
session.flush()
run_id = checker_run.id
elif run:
# There is already a run, update the results.
run.date = datetime.now()
run.command = command
run.duration = -1
session.flush()
run_id = run.id
else:
# There is no run create new.
checker_run = Run(name, version, command)
session.add(checker_run)
session.flush()
run_id = checker_run.id
# Add run to the history.
LOG.debug("adding run to the history")
if tag is not None:
run_history = session.query(RunHistory) \
.filter(RunHistory.run_id == run_id,
RunHistory.version_tag == tag) \
.one_or_none()
if run_history:
run_history.version_tag = None
session.add(run_history)
compressed_command = zlib.compress(command.encode("utf-8"),
zlib.Z_BEST_COMPRESSION)
run_history = RunHistory(run_id, tag, username, run_history_time,
compressed_command, codechecker_version,
description)
session.add(run_history)
session.flush()
LOG.debug("command store done")
# Create entry for analyzer statistics.
for analyzer_type, res in statistics.items():
analyzer_version = res.get('version', None)
successful = res.get('successful')
failed = res.get('failed')
failed_sources = res.get('failed_sources')
if analyzer_version:
LOG.debug(analyzer_version)
analyzer_version \
= zlib.compress(analyzer_version.encode('utf-8'),
zlib.Z_BEST_COMPRESSION)
LOG.debug("analyzer version compressed")
compressed_files = None
if failed_sources:
if version == '6.9.0':
failed_sources = ['Unavailable in CodeChecker 6.9.0!']
compressed_files = zlib.compress(
'\n'.join(failed_sources).encode('utf-8'),
zlib.Z_BEST_COMPRESSION)
LOG.debug("failed source compressed")
analyzer_statistics = AnalyzerStatistic(run_history.id,
analyzer_type,
analyzer_version,
successful,
failed,
compressed_files)
LOG.debug("stats added to session")
session.add(analyzer_statistics)
session.flush()
LOG.debug("stats store done")
return run_id
except Exception as ex:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
str(ex))
def finishCheckerRun(session, run_id):
"""
"""
try:
LOG.debug("Finishing checker run")
run = session.query(Run).get(run_id)
if not run:
return False
run.mark_finished()
return True
except Exception as ex:
LOG.error(ex)
return False
def setRunDuration(session, run_id, duration):
"""
"""
try:
run = session.query(Run).get(run_id)
if not run:
return False
run.duration = duration
return True
except Exception as ex:
LOG.error(ex)
return False
def addReport(session,
run_id,
file_id,
main_section,
bugpath,
events,
bug_extended_data,
detection_status,
detection_time,
severity_map):
"""
"""
try:
checker_name = main_section['check_name']
severity_name = severity_map.get(checker_name)
severity = ttypes.Severity._NAMES_TO_VALUES[severity_name]
report = Report(run_id,
main_section['issue_hash_content_of_line_in_context'],
file_id,
main_section['description'],
checker_name or 'NOT FOUND',
main_section['category'],
main_section['type'],
main_section['location']['line'],
main_section['location']['col'],
severity,
detection_status,
detection_time,
len(events))
session.add(report)
session.flush()
LOG.debug("storing bug path")
store_bug_path(session, bugpath, report.id)
LOG.debug("storing events")
store_bug_events(session, events, report.id)
LOG.debug("storing extended report data")
store_extended_bug_data(session, bug_extended_data, report.id)
return report.id
except Exception as ex:
raise codechecker_api_shared.ttypes.RequestFailed(
codechecker_api_shared.ttypes.ErrorCode.GENERAL,
str(ex))
def changePathAndEvents(session, run_id, report_path_map):
report_ids = list(report_path_map.keys())
session.query(BugPathEvent) \
.filter(BugPathEvent.report_id.in_(report_ids)) \
.delete(synchronize_session=False)
session.query(BugReportPoint) \
.filter(BugReportPoint.report_id.in_(report_ids)) \
.delete(synchronize_session=False)
for report_id, (bug_path, events) in report_path_map.items():
store_bug_path(session, bug_path, report_id)
store_bug_events(session, events, report_id)
def get_file_content(filepath, encoding):
"""Return the file content for the given filepath.
If the client sent the file contents encoded decode
the file content based on the encoding method.
This encoding is optionally used during network transfer
between the client an the server.
"""
with open(filepath, 'rb') as source_file:
content = source_file.read()
if encoding == ttypes.Encoding.BASE64:
content = base64.b64decode(content)
return content
def addFileContent(session, filepath, source_file_name, content_hash,
encoding):
"""
Add the necessary file contents. If the file is already stored in the
database then its ID returns. If content_hash in None then this function
calculates the content hash. Or if is available at the caller and | |
# coding=utf-8
# Copyright © 2018 Computational Molecular Biology Group,
# Freie Universität Berlin (GER)
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above
# copyright notice, this list of conditions and the
# following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials
# provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
# CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Created on 26.09.17
@author: clonker
"""
import numpy as _np
from readdy.api.utils import vec3_of as _v3_of
class PotentialRegistry(object):
def __init__(self, context_top_registry, context_type_registry, units):
self._registry = context_top_registry
self._types = context_type_registry
self._units = units
def add_harmonic_geometry(self, particle_type, force_constant, geometry, inclusion):
r"""
Adds harmonic inclusion or exclusion based on geometry (accessible via `system.geometry`).
:param particle_type: The particle type.
:param force_constant: The force constant.
:param geometry: Geometry.
:param inclusion: Whether inclusion (True) or exclusion (False).
"""
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
self._registry.add_harmonic_geometry(particle_type, force_constant, geometry, inclusion)
def add_box(self, particle_type, force_constant, origin, extent):
"""
Adds a box potential acting with a harmonic force on particles of the given type once they leave the area
spanned by the cuboid that has `origin` as its front lower left and `origin+extent` as its back upper right
vertex, respectively.
:param particle_type: the particle type for which the potential is registered
:param force_constant: the force constant [energy/length**2]
:param origin: the origin of the box [length]
:param extent: the extent of the box [length]
"""
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
origin = self._units.convert(origin, self._units.length_unit)
extent = self._units.convert(extent, self._units.length_unit)
self._registry.add_box(particle_type, force_constant, _v3_of(origin), _v3_of(extent))
def add_harmonic_repulsion(self, particle_type1, particle_type2, force_constant, interaction_distance):
"""
Adds harmonic repulsion between particles of type `particle_type1` and `particle_type2`. It is possible to have
the same type specified for `particle_type1` and `particle_type2`.
The interaction distance specifies the distance at which particles begin to interact with one another.
:param particle_type1: first particle type
:param particle_type2: second particle type
:param force_constant: the force constant [energy/length**2]
:param interaction_distance: the interaction distance [length]
"""
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
interaction_distance = self._units.convert(interaction_distance, self._units.length_unit)
self._registry.add_harmonic_repulsion(particle_type1, particle_type2, force_constant, interaction_distance)
def add_weak_interaction_piecewise_harmonic(self, particle_type1, particle_type2, force_constant, desired_distance,
depth, cutoff):
"""
Adds a weak interaction potential between particles of the specified types. This weak interaction is defined
by three harmonic potential terms and described by a `desired_distance`, i.e., a distance, at which the
potential energy is lowest inside the interaction radius, a `depth`, denoting the depth of the potential well,
and a `cutoff`, denoting the distance at which particles begin to interact.
:param particle_type1: first particle type
:param particle_type2: second particle type
:param force_constant: the force constant [energy/length**2]
:param desired_distance: the desired distance, i.e., smallest potential energy [length]
:param depth: depth of the potential well [energy]
:param cutoff: the cutoff radius [length]
"""
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
desired_distance = self._units.convert(desired_distance, self._units.length_unit)
depth = self._units.convert(depth, self._units.energy_unit)
cutoff = self._units.convert(cutoff, self._units.length_unit)
self._registry.add_weak_interaction_piecewise_harmonic(particle_type1, particle_type2, force_constant,
desired_distance, depth, cutoff)
def add_lennard_jones(self, particle_type1, particle_type2, epsilon, sigma, cutoff=None, m=12, n=6, shift=True):
"""
Adds a m-n-LJ potential with specified cutoff, epsilon, and sigma. `shift` is bool and denotes whether the
potential energy should be shifted to bridge the gap at `cutoff`.
:param particle_type1: first particle type
:param particle_type2: second particle type
:param epsilon: epsilon value [energy]
:param sigma: sigma value [length]
:param cutoff: the cutoff radius [length], default value results in 2.5*sigma
:param m: first exponent, default=12
:param n: second exponent, default=6
:param shift: whether to shift the potential energy, default=True
"""
assert isinstance(shift, bool), "shift can only be bool"
if cutoff is None:
cutoff = 2.5*sigma
cutoff = self._units.convert(cutoff, self._units.length_unit)
epsilon = self._units.convert(epsilon, self._units.energy_unit)
sigma = self._units.convert(sigma, self._units.length_unit)
self._registry.add_lennard_jones(particle_type1, particle_type2, m, n, cutoff, shift, epsilon, sigma)
def add_screened_electrostatics(self, particle_type1, particle_type2, electrostatic_strength,
inverse_screening_depth, repulsion_strength, repulsion_distance, exponent, cutoff):
"""
Adds a screened electrostatics potential between pairs of particles of type `particle_type1` and
`particle_type2`.
:param particle_type1: first particle type
:param particle_type2: second particle type
:param electrostatic_strength: the electrostatic strength [energy * length]
:param inverse_screening_depth: the inverse screening depth [1 / length]
:param repulsion_strength: the repulsion strength [energy]
:param repulsion_distance: the repulsion distance [length]
:param exponent: the exponent
:param cutoff: the cutoff radius [length]
"""
electrostatic_strength = self._units.convert(electrostatic_strength,
self._units.energy_unit * self._units.length_unit)
inverse_screening_depth = self._units.convert(inverse_screening_depth, 1/self._units.length_unit)
repulsion_strength = self._units.convert(repulsion_strength, self._units.energy_unit)
repulsion_distance = self._units.convert(repulsion_distance, self._units.length_unit)
cutoff = self._units.convert(cutoff, self._units.length_unit)
self._registry.add_screened_electrostatics(particle_type1, particle_type2, electrostatic_strength,
inverse_screening_depth, repulsion_strength, repulsion_distance,
exponent, cutoff)
def add_sphere(self, particle_type, force_constant, origin, radius, inclusion: bool):
"""
Adds a spherical potential that keeps particles of a certain type restrained to the inside or outside of the
specified sphere.
:param particle_type: the particle type
:param force_constant: strength of the potential [energy/length**2]
:param origin: origin of the sphere [length]
:param radius: radius of the sphere [length]
:param inclusion: if true, the potential will include particles, otherwise exclude them from the volume
"""
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
origin = self._units.convert(origin, self._units.length_unit)
radius = self._units.convert(radius, self._units.length_unit)
assert radius > 0, "radius has to be positive"
self._registry.add_sphere(particle_type, force_constant, _v3_of(origin), radius, inclusion)
def add_capsule(self, particle_type, force_constant, center, direction, length, radius):
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
center = self._units.convert(center, self._units.length_unit)
direction = self._units.convert(direction, self._units.length_unit)
length = self._units.convert(length, self._units.length_unit)
radius = self._units.convert(radius, self._units.length_unit)
self._registry.add_capsule(particle_type, force_constant, _v3_of(center), _v3_of(direction), length, radius)
def add_spherical_barrier(self, particle_type, height, width, origin, radius):
"""
A potential that forms a concentric barrier at a certain radius around a given origin. It is given a height
(in terms of energy) and a width. Note that the height can also be negative, then this potential acts as
a 'sticky' sphere. The potential consists of harmonic snippets, such that the energy landscape is continuous
and differentiable, the force is only continuous and not differentiable.
:param particle_type: the particle type
:param height: the height of the barrier [energy]
:param width: the width of the barrier [length]
:param origin: the origin of the sphere [length]
:param radius: the radius of the sphere [length]
"""
height = self._units.convert(height, self._units.energy_unit)
width = self._units.convert(width, self._units.length_unit)
origin = self._units.convert(origin, self._units.length_unit)
radius = self._units.convert(radius, self._units.length_unit)
assert radius > 0, "radius has to be positive"
assert _np.abs(height) > 0, "magnitude of height has to be positive"
assert width > 0, "width has to be positive"
self._registry.add_spherical_barrier(particle_type, height, width, _v3_of(origin), radius)
def add_cylinder(self, particle_type, force_constant, origin, normal, radius, inclusion: bool):
"""
A potential that keeps particles inside or outside a cylindrical volume. Particles penetrating the boundary are
harmonically pushed back again.
:param particle_type: the particle type
:param force_constant: the strength of the confining force [energy/length**2]
:param origin: any point on the axis of the cylinder [length]
:param normal: direction of the axis of the cylinder [length]
:param radius: radius of the cylinder [length]
:param inclusion: if true, the potential will include particles, otherwise exclude them from the volume
"""
force_constant = self._units.convert(force_constant, self._units.force_constant_unit)
origin = self._units.convert(origin, self._units.length_unit)
normal = self._units.convert(normal, self._units.length_unit)
radius = self._units.convert(radius, self._units.length_unit)
assert force_constant > 0, "force_constant has to be positive"
assert radius > 0, "radius has to be positive"
self._registry.add_cylinder(particle_type, force_constant, _v3_of(origin), _v3_of(normal), radius, inclusion)
def add_custom_external(self, particle_type, clazz, *args):
"""
This method allows to add a custom potential | |
<gh_stars>10-100
from .preprocessing import *
from os.path import dirname, join, expanduser
from joblib import Parallel, delayed
from .utils import *
from tqdm import tqdm
import pandas as pd
import numpy as np
import censusdata
import json
import sys
import os
import re
def pad_logrecno(data):
data["LOGRECNO"] = data["LOGRECNO"].str.zfill(7)
return (data)
class ACS_Parser():
"""
Parses the raw ACS data
Parameters:
----------
support_files_path: str
Support files path pointing to where the raw ACS data is stored
year: str (default 2019)
Year of ACS data.
span: str (default 5)
Span of ACS data. The ACS data is available in 1 or 5 year spans. The 5yr ACS data is the most comprehensive & is available at more granular levels than 1yr data.
state_level: str
State to parse
n_jobs: int
Number of jobs in parallel
"""
def __init__(self, support_files_path, state_level, n_jobs=-1, year='2019', span='5'):
self.support_files_path = support_files_path
self.year = year
self.span = span
self.state_level = state_level
self.n_jobs = n_jobs
self.raw_acs_path = os.path.join(self.support_files_path,
f"raw/acs/{self.year}/{self.span}yr")
self.out_acs_path = os.path.join(self.support_files_path,
"parsed/acs",
self.year,
f"{self.span}yr")
self.fol_acs_tmp = f"{self.year}_{self.span}yr_Summary_FileTemplates/"
def fit(self):
return self
def acs_geo(self, data):
"""
Merges input data with ACS data
Parameters:
----------
data: dataframe
Original input dataframe
"""
geo_file_path = os.path.join(self.raw_acs_path,
self.fol_acs_tmp,
f'{self.span}_year_Mini_Geo.xlsx')
ag_df = pd.read_excel(geo_file_path,
sheet_name=self.state_level,
dtype=str)
ag_df = ag_df.rename(columns={'Logical Record Number': 'LOGRECNO',
'Geography ID': 'GEOID'})
data = data.merge(ag_df,
on=["LOGRECNO"])
return (data)
def acs_track_5yr(self):
"""
Initializes 5 year sequence dictionary
"""
file_path_list = []
sequence_dict = {}
for root, dirs, files in os.walk(os.path.join(self.raw_acs_path,
"Tracts_Block_Groups")):
for file in files:
if ('000.txt' in file) & \
('e' in file) & \
(f'{self.span}{self.state_level}' in file):
file_path_list.append("".join([root, file]))
sequence = file.split('000.txt')[0].split(self.state_level)[1]
sequence = str(int(sequence))
sequence_dict[sequence] = {}
sequence_dict[sequence]["data"] = None
sequence_dict[sequence]["sequence"] = None
sequence_dict[sequence]["headers"] = None
sequence_dict[sequence]['Tracts_Block_Groups'] = {}
sequence_dict[sequence]['Not_Tracts_Block_Groups'] = {}
sequence_dict[sequence]['Tracts_Block_Groups']["file"] = file
sequence_dict[sequence]['Not_Tracts_Block_Groups']["file"] = None
sequence_dict[sequence]['Tracts_Block_Groups']["data"] = None
sequence_dict[sequence]['Not_Tracts_Block_Groups']["data"] = None
for root, dirs, files in os.walk(os.path.join(self.raw_acs_path, "Not_Tracts_Block_Groups")):
for file in files:
if ('000.txt' in file) & ('e' in file) & \
(f'{self.span}{self.state_level}' in file):
file_path_list.append("".join([root, file]))
sequence = file.split('000.txt')[0].split(self.state_level)[1]
sequence = str(int(sequence))
sequence_dict[sequence]['Not_Tracts_Block_Groups']["file"] = file
return (sequence_dict)
def acs_track_1yr(self):
"""
Initializes 1 year sequence dictionary
"""
file_path_list = []
sequence_dict = {}
for root, dirs, files in os.walk(os.path.join(self.raw_acs_path,
"All_Geographies")):
for file in files:
if ('000.txt' in file) & \
('e' in file) & \
(f'{self.span}{self.state_level}' in file):
file_path_list.append("".join([root, file]))
sequence = file.split('000.txt')[0].split(self.state_level)[1]
sequence = str(int(sequence))
sequence_dict[sequence] = {}
sequence_dict[sequence]["file"] = file
sequence_dict[sequence]["sequence"] = None
sequence_dict[sequence]["data"] = None
sequence_dict[sequence]["headers"] = None
return (sequence_dict)
def acs_parse_5yr(self, sequence_dict, i, save_table):
"""
Parses 5 year ACS data
Parameters:
----------
sequence_dict: dict
State level dictionary that contains file paths & sequence names
i: int
Sequence iteration
save_table: bool
Indicating is table will be saved to file
"""
print("... Generating ACS table", self.state_level, "sequence", i)
dat_file_1 = os.path.join(self.raw_acs_path,
"Tracts_Block_Groups",
sequence_dict[str(i)]['Tracts_Block_Groups']['file'])
dat_file_2 = os.path.join(self.raw_acs_path,
"Not_Tracts_Block_Groups",
sequence_dict[str(i)]['Not_Tracts_Block_Groups']['file'])
try:
tmp_data_1 = pd.read_csv(dat_file_1, sep=",", header=None, dtype=str)
tmp_data_2 = pd.read_csv(dat_file_2, sep=",", header=None, dtype=str)
seq_file = os.path.join(self.raw_acs_path,
sequence_dict[str(i)]['sequence'])
tmp_headers = pd.read_excel(seq_file, sheet_name='e', dtype=str)
sequence_dict[str(i)]["headers"] = tmp_headers
feature_mapping = sequence_dict[str(i)]['headers'].to_dict('records')
sequence_dict[str(i)]['description'] = feature_mapping[0]
new_col_names = list(tmp_headers.columns)
tmp_data_1.columns = new_col_names
tmp_data_2.columns = new_col_names
except pd.errors.EmptyDataError:
print(f' Note: {dat_file_1} was empty.')
tmp_data = pd.read_csv(dat_file_2, sep=",", header=None, dtype=str)
seq_file = os.path.join(self.raw_acs_path,
sequence_dict[str(i)]['sequence'])
tmp_headers = pd.read_excel(seq_file, sheet_name='e', dtype=str)
sequence_dict[str(i)]["headers"] = tmp_headers
feature_mapping = sequence_dict[str(i)]['headers'].to_dict('records')
sequence_dict[str(i)]['description'] = feature_mapping[0]
new_col_names = list(tmp_headers.columns)
tmp_data.columns = new_col_names
tmp_data = pad_logrecno(tmp_data)
tmp_data = self.acs_geo(tmp_data)
sequence_dict[str(i)]["data"] = tmp_data
if save_table:
file_name = "".join(["Zest_ACS_", self.state_level, "_seq",
str(i), "_", self.year, '_', self.span,
'yr.parquet'])
save_dataframe(tmp_data, self.out_acs_path, file_name)
return (sequence_dict[str(i)])
def acs_parse_1yr(self, sequence_dict, i, save_table):
"""
Parses 1 year ACS data
Parameters:
----------
sequence_dict: dict
State level dictionary that contains file paths & sequence names
i: int
Sequence iteration
save_table: bool
Indicating is table will be saved to file
"""
print("... Generating ACS table", self.state_level, "sequence", i)
dat_file = os.path.join(self.raw_acs_path,
"All_Geographies",
sequence_dict[str(i)]['file'])
tmp_data = pd.read_csv(dat_file, sep=",", header=None, dtype=str)
seq_file = os.path.join(self.raw_acs_path,
sequence_dict[str(i)]['sequence'])
tmp_headers = pd.read_excel(seq_file, sep=",",
sheet_name="e", dtype=str)
sequence_dict[str(i)]["headers"] = tmp_headers
feature_mapping = sequence_dict[str(i)]['headers'].to_dict('records')
sequence_dict[str(i)]['description'] = feature_mapping[0]
new_col_names = list(tmp_headers.columns)
tmp_data.columns = new_col_names
tmp_data = pad_logrecno(tmp_data)
tmp_data = self.acs_geo(tmp_data)
sequence_dict[str(i)]["data"] = tmp_data
if save_table:
file_name = "".join(["Zest_ACS_", self.state_level, "_seq", str(i),
"_", self.year, '_', self.span, 'yr.parquet'])
save_dataframe(tmp_data, self.out_acs_path, file_name)
return (sequence_dict[str(i)])
def transform(self, save_table=True):
"""
Parameters
----------
save_table: bool
Optional save
"""
if save_table:
make_directory(output_directory=self.raw_acs_path)
make_directory(output_directory=self.out_acs_path)
make_directory(output_directory=os.path.join(self.raw_acs_path,
self.fol_acs_tmp))
if self.span == '5':
sequence_dict = self.acs_track_5yr()
for root, dirs, files in os.walk(os.path.join(self.raw_acs_path,
self.fol_acs_tmp)):
for file in files:
if ('.xlsx' in file) & ('Geo' not in file):
sequence = file.split('seq')[1].split('.xlsx')[0]
sequence = str(int(sequence))
seq_str = "".join([self.fol_acs_tmp, "seq", sequence, ".xlsx"])
sequence_dict[sequence]["sequence"] = seq_str
results = Parallel(n_jobs=self.n_jobs, verbose=1)(
delayed((self.acs_parse_5yr))(sequence_dict, sni, save_table) for sni in
tqdm(list(sequence_dict.keys())))
elif self.span == '1':
sequence_dict = self.acs_track_1yr()
for root, dirs, files in os.walk(os.path.join(self.raw_acs_path,
self.fol_acs_tmp)):
for file in files:
if ('.xlsx' in file) & ('Geo' not in file):
sequence = file.split('seq')[1].split('.xlsx')[0]
sequence = str(int(sequence))
seq_str = "".join([self.fol_acs_tmp, "seq", sequence, ".xlsx"])
sequence_dict[sequence]["sequence"] = seq_str
results = Parallel(n_jobs=self.n_jobs, verbose=1)(
delayed((self.acs_parse_1yr))(sequence_dict, sni, save_table) for sni in
tqdm(list(sequence_dict.keys())))
else:
raise ValueError('Improper ACS span provided. The only accepted values are 1 & 5')
results_out = {}
for d in results:
seqn = re.findall('[0-9]{1,3}', d['sequence'])[-1]
results_out[seqn] = {}
results_out[seqn].update(d)
return (results_out)
def acs_census_data(support_files_path, level):
"""Create ACS Lookup Tables using the censusdata package.
Parameters:
-----------
level:
Geographic level to return ACS for, options include:
['block group' tract', 'zip', 'county', 'state']
"""
curpath = dirname(__file__)
data_path = join(curpath, '../data/')
misc_args = load_json(join(data_path, 'misc_args.json'))
states = load_json(join(data_path, 'states.json'))
df = pd.DataFrame(columns=misc_args)
if level == 'zip':
temp = censusdata.download(
"acs5",
2019,
censusdata.censusgeo([
('zip code tabulation area', '*')]),
misc_args,
)
df = df.append(temp)
else:
pass
for i in range(len(states)):
print("State:", i, "\n")
if level == 'tract':
temp = censusdata.download(
"acs5",
2019,
censusdata.censusgeo([
('state', states[i]),
('county', '*'),
('tract', '*')
]),
misc_args,
)
df = df.append(temp)
elif level == 'block group':
temp = censusdata.download(
"acs5",
2019,
censusdata.censusgeo([
('state', states[i]),
('county', '*'),
('block group', '*')
]),
misc_args,
)
df = df.append(temp)
else:
temp = censusdata.download(
"acs5",
2019,
censusdata.censusgeo([
('state', states[i]),
('county', '*')
]),
misc_args,
)
df = df.append(temp)
return (df)
class ACS_LookupBuilder():
"""Creates a core ACS lookup table by geo level
Parameter
--------
geo: str
Geo key to identify which geographic level the ACS table will be made at.
Three levels are currently supported zip, tract, or block group
year: str (default 2019)
Year of ACS data.
span: str
Span of ACS data. The ACS data is available in 1 or 5 year spans. The 5yr ACS data is the most comprehensive & is available at more granular levels than 1yr data.
n_jobs: int
Number of jobs in parallel
required_tables: list
List of ACS table names to select data to include in the Lookup table
"""
def __init__(self, support_files_path, geo, year='2019', span='5', n_jobs=-1, required_tables=None):
self.support_files_path = support_files_path
self.geo = geo
self.year = year
self.span = span
self.n_jobs = n_jobs
self.support_files_path = support_files_path
curpath = dirname(__file__)
parsed_path = join(support_files_path, f'../data/parsed/acs/{self.year}/{self.span}yr')
processed_path = join(curpath, f'../data/processed/acs/{self.year}/{self.span}yr')
self.raw_acs_path = parsed_path
self.out_acs_path = processed_path
self.required_tables = required_tables
if self.required_tables is None:
self.required_tables = ['B01003', 'B02001', 'B03001', 'B04004', 'B04006',
'B04007', 'B05011', 'B05012', 'B06009', 'B07101',
'B08301', 'B10051', 'B11017', 'B16001', 'B19001',
'B23020', 'B25004', 'B25075', 'B99021', 'B99162', 'C16001']
def acs_select_features(self, data):
"""Selects predefined ACS tables to create a lookup table"""
prts = "|".join(self.required_tables + ['GEO', 'Geography'])
data = data.filter(regex=prts)
return (data)
def acs_join_lookup(self, data, mco):
"""Appends new geo level to feature column"""
tbl_dict = {}
for rn in required_names:
data = data.filter(regex="|".join([rn, 'GEO']))
tbl_dict[rn] = data
return (tbl_dict)
def parsed_acs_proc(self, file, geo_pattern):
"""Creates a dataframe of geo-specific ACS data by sequence
Parameter
--------
file: str
Filename
geo_pattern: str
Pattern to identify geo specific data
"""
drop_cols = []
long_name = 'GEO_NAME'
long_id = 'EXT_GEOID'
tmp_data = load_file(os.path.join(self.raw_acs_path, file))
tmp_data = self.acs_select_features(tmp_data)
tmp_data = tmp_data.rename(columns={'GEOID': long_id,
'Geography Name': long_name})
tmp_data = tmp_data[tmp_data[long_name].str.upper().str.contains(geo_pattern,
regex=True)]
na_cols = list(tmp_data.columns[tmp_data.isnull().all()])
tmp_data = tmp_data.drop(drop_cols + na_cols,
axis=1)
tmp_data['GEOID'] = None
tmp_data['GEOID'] = tmp_data[long_id].apply(lambda x: x.split('US')[1]).astype(str)
if tmp_data.shape[1] < 4:
tmp_data = pd.DataFrame()
else:
tmp_data = tmp_data.set_index(['GEOID',
'GEO_NAME',
'EXT_GEOID']
).sort_index()
return (tmp_data)
def transform(self, save_table):
| |
<filename>flappy/envs/fwmav/controllers/arc_xy_arc_z.py<gh_stars>100-1000
import numpy as np
class pid:
def __init__(self):
self.old_error = 0
self.integral = 0
self.int_max = 0
self.Kp = 0
self.Ki = 0
self.Kd = 0
self.p = 0
self.i = 0
self.d = 0
class ARCController():
def __init__(self,dt):
self.dt_ = dt
# desired target
self.desired_accel_x_ = 0
self.desired_accel_y_ = 0
self.pos_target_x_ = 0
self.pos_target_y_ = 0
self.pos_target_z_ = 0
self.vel_target_z_ = 0
self.ang_ef_target_z_ = 0
self.rate_ef_target_z_ = 0
####################################### z controller #######################################
# error
self.z_eq_dot_old = 0
# position target filter
self.pos_target_z_alt_filtered_ = 0
self.pos_target_z_alt_filtered_old_ = 0
self.vel_target_z_alt_filtered_ = 0
tau_z = 1.5 # time constant
self.alpha_z_target = dt/(tau_z+dt)
tau_z_dot = 0.05 # time constant
self.alpha_z_dot_target = dt/(tau_z_dot+dt)
# system parameters
self.K_Fz = 0.0165 # Fz = K_Fz*(u_z - V_s) from force mapping
self.V_s = 2.5115
self.mass = 0.012
# parameter estimate
self.theta_hat_z = np.zeros([3,1])
self.theta_hat_z[0,0] = self.mass
self.theta_hat_z[1,0] = 1 # constant and not updated
self.theta_hat_z[2,0] = 0.0001 # disturbance in z direction in the unit of Newton
self.theta_hat_z_min = np.zeros([3,1])
self.theta_hat_z_min[0,0] = 0.011 # 11 gram
self.theta_hat_z_min[1,0] = 1 # not update
self.theta_hat_z_min[2,0] = -0.049 # 5 gram = 0.049 N
self.theta_hat_z_max = np.zeros([3,1])
self.theta_hat_z_max[0,0] = 0.018 # 11 gram
self.theta_hat_z_max[1,0] = 1 # not update
self.theta_hat_z_max[2,0] = 0.049 # 5 gram = 0.049 N
self.h_z = (0.018-0.012) + (0.098) + 0.01 #18-12 gram for mass estimation + +-5gram for disturbance + 0.01N(~1gram) for uncertainty
self.h_z = np.sum(self.theta_hat_z_max-self.theta_hat_z_min) + 0.01 #18-12 gram for mass estimation + +-5gram for disturbance + 0.01N(~1gram) for uncertainty
# print('h_z = %.4f' % self.h_z, end="\n\r")
# regressor
self.phi_z = np.zeros([3,1])
self.phi_z[0,0] = -9.8
self.phi_z[1,0] = -self.K_Fz*self.V_s
self.phi_z[2,0] = 1
# gains
self.k_1_z = 1.5#*3 #
self.k_s1_z = 0.10#*3
self.epsilon_z = 0.12
self.Gamma_z = np.eye(3)
# gain tuning
k_eq_z = 1 # controls I, large k_eq will result in oscilating theta and oscilating performance
h_M_z = self.h_z
self.epsilon_z = 1/(4*k_eq_z-self.k_1_z)*h_M_z*2
#print('epsilon_z = %.4f' % self.epsilon_z, end="\n\r")
#self.epsilon_z = 0.115
zeta_z = 0.707
k_i_z = k_eq_z**2/(4*zeta_z**2)
#print('k_i_z = %.4f' % k_i_z, end="\n\r")
W_z = np.diag([0.029,0,0.098]) # diag{max(|theta_max+theta_min|, theta_max-theta_min)}
s_phi_z = np.asscalar(self.phi_z.transpose().dot(np.square(W_z)).dot(self.phi_z))
#print('s_phi_z = %.4f' % s_phi_z, end="\n\r")
gamma_z = k_i_z/s_phi_z
gamma_z = 5.5343
#print('gamma_z = %.4f' % gamma_z, end="\n\r")
self.Gamma_z = gamma_z*np.square(W_z)
self.Gamma_z[2,2] = self.Gamma_z[2,2]
#print('Gamma_z=', end="\n\r")
#print(self.Gamma_z, end="\n\r")
####################################### xy controller #######################################
# position target filter
self.pos_target_x_filtered_ = 0
self.pos_target_x_filtered_old_ = 0
self.pos_target_x_filtered_old_2_ = 0
self.vel_target_x_filtered_ = 0;
self.acc_target_x_ = 0;
self.acc_target_x_filtered_ = 0;
self.jer_target_x_ = 0;
self.jer_target_x_filtered_ = 0;
self.pos_target_y_filtered_ = 0
self.pos_target_y_filtered_old_ = 0
self.pos_target_y_filtered_old_2_ = 0
self.vel_target_y_filtered_ = 0;
self.acc_target_y_ = 0;
self.acc_target_y_filtered_ = 0;
self.jer_target_y_ = 0;
self.jer_target_y_filtered_ = 0;
self.pos_target_z_filtered_ = 0
self.pos_target_z_filtered_old_ = 0
self.pos_target_z_filtered_old_2_ = 0
self.vel_target_z_filtered_ = 0;
self.acc_target_z_ = 0;
self.acc_target_z_filtered_ = 0;
self.jer_target_z_ = 0;
self.jer_target_z_filtered_ = 0;
ts_xy = 0.02 # settling time of xy
zeta_xy = 1 # damping ratio of xy
omega_n_xy = 5/(zeta_xy*ts_xy) # natural frequency of xy
self.a_2_xy_target = -1/(1 + 2*zeta_xy*omega_n_xy*dt + omega_n_xy**2*dt**2)
self.b_1_xy_target = omega_n_xy**2*dt**2/(1 + 2*zeta_xy*omega_n_xy*dt + omega_n_xy**2*dt**2)
self.a_1_xy_target = 1 - self.b_1_xy_target - self.a_2_xy_target
tau_xy = 0.002 # time constant
self.alpha_xy_target = dt/(tau_xy+dt)
# system parameters
self.tau_x = -0.2788e-3 # roll torque (Nm)
self.K_Tx = 0.9068e-3 # (Nm/V)
self.tau_y = 0.1009e-3 # pitch torque (Nm)
self.K_Ty = 0.3199e-3 # (Nm/V)
self.tau_z = 0.099e-3 # yaw torque (Nm)
self.K_Tz = 1.8401e-3 # (Nm/splitcycle)
self.I_x = 5000e-9 # kgm^2
self.I_y = 3500e-9 # kgm^2
self.I_z = 1800e-9 # kgm^2
# parameter estimate
self.theta_hat_xy = np.zeros([9,1])
self.theta_hat_xy[0,0] = self.tau_x # roll torque offsed
self.theta_hat_xy[1,0] = self.tau_y # pitch torque offsed
self.theta_hat_xy[2,0] = self.tau_z # yaw torque offsed
self.theta_hat_xy[3,0] = 1 # constant and not updated
self.theta_hat_xy[4,0] = 1 # constant and not updated
self.theta_hat_xy[5,0] = 1 # constant and not updated
self.theta_hat_xy[6,0] = 0 # disturbance in roll direction (1e-4Nm = 0.1Nmm)
self.theta_hat_xy[7,0] = 0 # disturbance in pitch direction (1e-4Nm = 0.1Nmm)
self.theta_hat_xy[8,0] = 0 # disturbance in yaw direction (1e-4Nm = 0.1Nmm)
self.theta_hat_xy_min = np.zeros([9,1])
self.theta_hat_xy_min[0,0] = -0.7e-3 # -0.7Nmm = -0.5V
self.theta_hat_xy_min[1,0] = -0.25e-3 # -0.25Nmm = -1V
self.theta_hat_xy_min[2,0] = -0.1e-3 # -0.1Nmm = -0.1
self.theta_hat_xy_min[3,0] = 1 # constant and not updated
self.theta_hat_xy_min[4,0] = 1 # constant and not updated
self.theta_hat_xy_min[5,0] = 1 # constant and not updated
self.theta_hat_xy_min[6,0] = -0.5e-3 # disturbance in roll direction (0.5Nmm)
self.theta_hat_xy_min[7,0] = -0.25e-3 # disturbance in pitch direction (0.25Nmm)
self.theta_hat_xy_min[8,0] = -0.02e-3 # disturbance in yaw direction (0.02Nmm)
self.theta_hat_xy_max = np.zeros([9,1])
self.theta_hat_xy_max[0,0] = 0.7e-3 # 0.6Nmm = 1V
self.theta_hat_xy_max[1,0] = 0.25e-3 # 0.175Nmm = 0V
self.theta_hat_xy_max[2,0] = 0.1e-3 # 0.1Nmm = 0
self.theta_hat_xy_max[3,0] = 1 # constant and not updated
self.theta_hat_xy_max[4,0] = 1 # constant and not updated
self.theta_hat_xy_max[5,0] = 1 # constant and not updated
self.theta_hat_xy_max[6,0] = 0.5e-3 # disturbance in roll direction (0.5Nmm)
self.theta_hat_xy_max[7,0] = 0.25e-3 # disturbance in pitch direction (0.25Nmm)
self.theta_hat_xy_max[8,0] = 0.02e-3 # disturbance in yaw direction (0.02Nmm)
# regressor
self.phi_xy = np.concatenate((np.eye(3),np.zeros([3,3]),np.eye(3)),axis = 0) # 9x3 matrix
self.h_xy = np.matmul(self.phi_xy.transpose(),(self.theta_hat_xy_max-self.theta_hat_xy_min)) + 0.2e-3 # 3x9 * 9x1 = 3x1 +0.2Nmm for uncertainty
# print('h_xy =', end="\n\r")
# print(self.h_xy, end="\n\r")
self.omega_x_eq_old_ = 0
self.omega_y_eq_old_ = 0
self.omega_z_eq_old_ = 0
# gains
self.lambda_1 = 2
self.lambda_1_pitch = 20
self.lambda_1_roll = 10
self.lambda_2 = 4
self.lambda_2_pitch = 40
self.lambda_2_roll = 40
self.lambda_3 = 10
self.lambda_3_pitch = 120#20
self.lambda_3_roll = 100
self.lambda_psi = 400
self.k_s1_xy = np.array([0.7*self.K_Tx, 0.9*self.K_Ty, 0.001*self.K_Tz])
self.Gamma_xy = np.eye(9)
# gain tuning
k_eq_x = 0.05#0.05-0.08 # controls I, large k_eq will result in oscilating theta and oscilating performance
k_eq_y = 0.05#0.05-0.08 # controls I, large k_eq will result in oscilating theta and oscilating performance
k_eq_yaw = 0.001
h_M_xy = self.h_xy
#self.epsilon_xy = 1/(4*k_eq_xy-self.lambda_3)*h_M_xy*2 # 3x1, not sure if correct, epsilon_xy mainly consists of the uncertainty term in h_xy
self.epsilon_xy = np.array([[0.1], [0.1], [0.1]])
# print('epsilon_xy =', end="\n\r")
# print(self.epsilon_xy, end="\n\r")
zeta_xy = 0.707
k_i_x = k_eq_x**2/(4*zeta_xy**2)
k_i_y = k_eq_y**2/(4*zeta_xy**2)
k_i_yaw = k_eq_yaw**2/(4*zeta_xy**2)
# print('k_i_x = %.4f' % k_i_x, end="\n\r")
# print('k_i_y = %.4f' % k_i_y, end="\n\r")
# print('k_i_yaw = %.4f' % k_i_yaw, end="\n\r")
W_xy = np.diag([1.4e-3, 0.5e-3, 0.3e-3, 0, 0, 0, 1e-3, 0.5e-3, 0.04e-3]) # diag{max(|theta_max+theta_min|, theta_max-theta_min)}
s_phi_xy = self.phi_xy.transpose().dot(np.square(W_xy)).dot(self.phi_xy) # 3x9 * 9x9 * 9x3 = 3x3
# print('s_phi_xy=', end="\n\r")
# print(s_phi_xy, end="\n\r")
gamma_xy = k_i_x*np.linalg.inv(s_phi_xy)
gamma_y = k_i_y*np.linalg.inv(s_phi_xy)
gamma_yaw = k_i_yaw*np.linalg.inv(s_phi_xy)
gamma_xy[1,1] = gamma_y[1,1]
gamma_xy[2,2] = gamma_yaw[2,2]
#gamma_z = 5.5343
# print('gamma_xy=', end="\n\r")
# print(gamma_xy, end="\n\r")
temp0 = np.concatenate((gamma_xy, np.zeros([3,6])), axis = 1)
temp2 = np.concatenate((np.zeros([3,6]), gamma_xy), axis = 1)
gamma_xy_99 = np.concatenate((temp0, np.zeros([3,9]), temp2),axis = 0)
self.Gamma_xy = gamma_xy_99.dot(np.square(W_xy))
# print('Gamma_xy=', end="\n\r")
# print(self.Gamma_xy, end="\n\r")
# control voltage limit
self.differential_voltage_max_ = 2
self.mean_voltage_max_ = 2.5
self.split_cycle_max_ = 0.15
self.hover_voltage_ = 9.3
self.voltage_amplitude_max_ = 18
self.voltage_amplitude_ = 12
self.differential_voltage_ = 0
self.mean_voltage_ = 0
self.split_cycle_ = 0
# state filter
RC = 1/(2*np.pi*20)
self.alpha = dt/(RC+dt)
RC = 1/(2*np.pi*20)
self.alpha_yaw = dt/(RC+dt)
RC = 1/(2*np.pi*2)
self.alpha_xyz = dt/(RC+dt)
self.pos_current_x_ = 0
self.pos_current_y_ = 0
self.vel_current_x_ = 0
self.vel_current_y_ = 0
self.roll_angle_ = 0
self.pitch_angle_ = 0
self.yaw_angle_ = 0
self.gyro_x_ = 0
self.gyro_y_ = 0
self.gyro_z_ = 0
self.altitude_ = 0
self.velocity_z_ = 0
self.acceleration_z_ = 0
self.raw_velocity_z_old = 0
def get_action(self, observation):
self.sensor_read(observation)
self.controller_run()
action = np.zeros([4],dtype=np.float64)
action[0] = self.voltage_amplitude_
action[1] = self.differential_voltage_
action[2] = self.mean_voltage_
action[3] = self.split_cycle_
return action
def controller_run(self):
self.z_control()
self.xy_control()
def sensor_read(self, observation):
self.vel_current_x_old_ = self.vel_current_x_
self.vel_current_y_old_ = self.vel_current_y_
#updat raw observation
raw_pos_current_x_ = observation[9]
raw_pos_current_y_ = observation[10]
raw_vel_current_x_ = observation[12]
raw_vel_current_y_ = observation[13]
R = observation[0:9]
[raw_roll_angle_, raw_pitch_angle_, raw_yaw_angle_] = self.rotation_to_euler_angle(R.reshape(3,3))
raw_gyro_x_ = observation[15]
raw_gyro_y_ = observation[16]
raw_gyro_z_ = observation[17]
raw_altitude_ = observation[11]
raw_velocity_z_ = observation[14]
raw_acceleration_z_ = (raw_velocity_z_ - self.raw_velocity_z_old)/self.dt_
self.raw_velocity_z_old = raw_velocity_z_
# filter with low pass
self.pos_current_x_ = self.pos_current_x_*(1-self.alpha_xyz) + raw_pos_current_x_*self.alpha_xyz
self.pos_current_y_ = self.pos_current_y_*(1-self.alpha_xyz) + raw_pos_current_y_*self.alpha_xyz
self.vel_current_x_ = self.vel_current_x_*(1-self.alpha_xyz) + raw_vel_current_x_*self.alpha_xyz
self.vel_current_y_ = self.vel_current_y_*(1-self.alpha_xyz) + raw_vel_current_y_*self.alpha_xyz
self.roll_angle_ = self.roll_angle_*(1-self.alpha) + raw_roll_angle_*self.alpha
self.pitch_angle_ = self.pitch_angle_*(1-self.alpha) + raw_pitch_angle_*self.alpha
self.yaw_angle_ = self.yaw_angle_*(1-self.alpha_yaw) + raw_yaw_angle_*self.alpha_yaw
self.gyro_x_ = self.gyro_x_*(1-self.alpha) + raw_gyro_x_*self.alpha
self.gyro_y_ = self.gyro_y_*(1-self.alpha) + raw_gyro_y_*self.alpha
self.gyro_z_ = self.gyro_z_*(1-self.alpha_yaw) + raw_gyro_z_*self.alpha_yaw
self.altitude_ = self.altitude_*(1-self.alpha_xyz) + raw_altitude_*self.alpha_xyz
self.velocity_z_ = self.velocity_z_*(1-self.alpha_xyz) + raw_velocity_z_*self.alpha_xyz
self.acceleration_z_ = self.acceleration_z_*(1-self.alpha_xyz) + raw_acceleration_z_*self.alpha_xyz
self.sin_roll_ = np.sin(self.roll_angle_)
self.cos_roll_ = np.cos(self.roll_angle_)
self.sin_pitch_ = np.sin(self.pitch_angle_)
self.cos_pitch_ = np.cos(self.pitch_angle_)
self.sin_yaw_ = np.sin(self.yaw_angle_)
self.cos_yaw_ = np.cos(self.yaw_angle_)
# derivatives
self.acc_current_x_ = (self.vel_current_x_ - self.vel_current_x_old_)/self.dt_
self.acc_current_y_ = (self.vel_current_y_ - self.vel_current_y_old_)/self.dt_
def xy_control(self):
############# implement element wise for easy porting to ARM(STM32)
# filter position target with second order filter
# generate velocity, acceleration, jerk target
# x
self.pos_target_x_filtered_old_2_ = self.pos_target_x_filtered_old_
self.pos_target_x_filtered_old_ = self.pos_target_x_filtered_
self.vel_target_x_filtered_old_ = self.vel_target_x_filtered_
self.acc_target_x_filtered_old_ = self.acc_target_x_filtered_
self.pos_target_x_filtered_ = self.a_1_xy_target*self.pos_target_x_filtered_old_ + self.a_2_xy_target*self.pos_target_x_filtered_old_2_ + self.b_1_xy_target*self.pos_target_x_;
self.vel_target_x_filtered_ = (self.pos_target_x_filtered_ - self.pos_target_x_filtered_old_)/self.dt_
self.acc_target_x_ = (self.vel_target_x_filtered_ - self.vel_target_x_filtered_old_)/self.dt_
self.acc_target_x_filtered_ = self.acc_target_x_filtered_ * (1-self.alpha_xy_target) + self.acc_target_x_*self.alpha_xy_target
self.jer_target_x_ = (self.acc_target_x_filtered_ - self.acc_target_x_filtered_old_)/self.dt_
self.jer_target_x_filtered_ = self.jer_target_x_filtered_ * (1-self.alpha_xy_target) + self.jer_target_x_*self.alpha_xy_target
# print('pos_target_x_filtered_ = %.6f, vel_target_x_filtered_ = %.6f, acc_target_x_filtered_ = %.6f' % (self.pos_target_x_filtered_, self.vel_target_x_filtered_, self.acc_target_x_filtered_), end="\n\r")
# y
self.pos_target_y_filtered_old_2_ = self.pos_target_y_filtered_old_
self.pos_target_y_filtered_old_ = self.pos_target_y_filtered_
self.vel_target_y_filtered_old_ = self.vel_target_y_filtered_
self.acc_target_y_filtered_old_ = self.acc_target_y_filtered_
self.pos_target_y_filtered_ = self.a_1_xy_target*self.pos_target_y_filtered_old_ + self.a_2_xy_target*self.pos_target_y_filtered_old_2_ + self.b_1_xy_target*self.pos_target_y_;
self.vel_target_y_filtered_ = (self.pos_target_y_filtered_ - self.pos_target_y_filtered_old_)/self.dt_
self.acc_target_y_ = (self.vel_target_y_filtered_ - self.vel_target_y_filtered_old_)/self.dt_
self.acc_target_y_filtered_ = self.acc_target_y_filtered_ * (1-self.alpha_xy_target) + self.acc_target_y_*self.alpha_xy_target
self.jer_target_y_ = (self.acc_target_y_filtered_ - self.acc_target_y_filtered_old_)/self.dt_
self.jer_target_y_filtered_ = self.jer_target_y_filtered_ * (1-self.alpha_xy_target) + self.jer_target_y_*self.alpha_xy_target
# z
self.pos_target_z_filtered_old_2_ = self.pos_target_z_filtered_old_
self.pos_target_z_filtered_old_ = self.pos_target_z_filtered_
self.vel_target_z_filtered_old_ = self.vel_target_z_filtered_
self.acc_target_z_filtered_old_ = self.acc_target_z_filtered_
self.pos_target_z_filtered_ = self.a_1_xy_target*self.pos_target_z_filtered_old_ + self.a_2_xy_target*self.pos_target_z_filtered_old_2_ + self.b_1_xy_target*self.pos_target_z_;
self.vel_target_z_filtered_ = (self.pos_target_z_filtered_ - self.pos_target_z_filtered_old_)/self.dt_
self.acc_target_z_ = (self.vel_target_z_filtered_ - self.vel_target_z_filtered_old_)/self.dt_
self.acc_target_z_filtered_ = self.acc_target_z_filtered_ * (1-self.alpha_xy_target) + self.acc_target_z_*self.alpha_xy_target
self.jer_target_z_ = (self.acc_target_z_filtered_ - self.acc_target_z_filtered_old_)/self.dt_
self.jer_target_z_filtered_ = self.jer_target_z_filtered_ * (1-self.alpha_xy_target) + self.jer_target_z_*self.alpha_xy_target
# error
e_x = self.pos_current_x_ - self.pos_target_x_filtered_
e_x_dot = self.vel_current_x_ - self.vel_target_x_filtered_
e_x_ddot = self.acc_current_x_ - self.acc_target_x_filtered_
e_pitch = self.pitch_angle_ - 0
e_y = self.pos_current_y_ - self.pos_target_y_filtered_
e_y_dot = self.vel_current_y_ - self.vel_target_y_filtered_
e_y_ddot = self.acc_current_y_ - self.acc_target_y_filtered_
e_roll = self.roll_angle_ - 0
# print('e_y = %.6f, e_y_dot = %.6f, e_y_ddot = %.6f' % (e_y, e_y_dot, e_y_ddot), end="\n\r")
# print('e_x = %.6f, e_x_dot = %.6f, e_x_ddot = %.6f' % (e_x, e_x_dot, e_x_ddot), end="\n\r")
e_z = self.altitude_ - self.pos_target_z_filtered_
e_z_dot = self.velocity_z_ - self.vel_target_z_filtered_
e_z_ddot = self.acceleration_z_ - self.acc_target_z_filtered_
#print('e_z = %.6f, e_z_dot = %.6f, e_z_ddot = %.6f' % (e_z, e_z_dot, e_z_ddot), end="\n\r")
e_psi = self.yaw_angle_ - self.ang_ef_target_z_
x_tdot_eq = self.jer_target_x_filtered_ - self.lambda_1_pitch*e_x_ddot - self.lambda_2_pitch*e_x_dot - self.lambda_3_pitch*e_x
y_tdot_eq = self.jer_target_y_filtered_ - self.lambda_1_roll*e_y_ddot - self.lambda_2_roll*e_y_dot - self.lambda_3_roll*e_y
# print('y_tdot_eq = %.4f' % y_tdot_eq, end="\n\r")
# print('x_tdot_eq = %.4f' % x_tdot_eq, end="\n\r")
z_tdot_eq = self.jer_target_z_filtered_ - self.lambda_1*e_z_ddot - self.lambda_2*e_z_dot - self.lambda_3*e_z
# print('z_tdot_eq = %.4f' % z_tdot_eq, end="\n\r")
# sliding surface (try use real mass first)
F_z = self.K_Fz*(self.voltage_amplitude_ - self.V_s) #0.2-0.1N => 20-10 gram
#print('F_z = %.4f' % F_z, end="\n\r")
# i
R_11 = self.cos_yaw_*self.cos_pitch_
R_21 = self.sin_yaw_*self.cos_pitch_
R_31 | |
os.path.realpath(filenames[0])
if os.path.exists(filename):
output("%s already exists." % filename)
sys.exit(1)
if not input: # Create blank database, with just account username
logger.info("Creating new blank database %s for user '%s'.", filename, username)
db = skypedata.SkypeDatabase(filename)
for table in db.CREATE_STATEMENTS: db.create_table(table)
db.insert_account({"skypename": username})
output("Created blank database %s for user %s." % (filename, username))
db.close()
if username and (password or ask_password):
run_sync(filenames, username, password, ask_password, store_password)
return
counts = {}
def progress(result=None, **kwargs):
result = result or kwargs
if "counts" in result:
counts.update(result["counts"])
t = ", ".join(util.plural(x[:-1], counts[x], sep=",")
for x in sorted(counts))
bar.afterword = " Imported %s." % t
return True
username = live.SkypeExport.export_get_account(input)
db = live.SkypeExport(input, filename)
if ask_password and store_password: password = get_password(username)
logger.info("Creating new database %s from Skype export %s, user '%s'.",
filename, input, username)
output()
bar = ProgressBar(pulse=True, interval=0.05, static=conf.IsCLINonTerminal)
bar.afterword =" Importing %s" % filename
bar.start()
try: db.export_read(progress)
except Exception:
_, e, tb = sys.exc_info()
logger.exception("Error importing Skype export archive %s.", filename)
util.try_ignore(db.close)
util.try_ignore(os.unlink, filename)
raise e, None, tb
bar.stop()
bar.pulse = False
bar.update(100)
db.close()
if password and store_password:
conf.Login.setdefault(filename, {})
conf.Login[filename].update(store=True, password=<PASSWORD>(password))
conf.save()
sz = util.format_bytes(os.path.getsize(filename))
t = " and ".join(util.plural(x[:-1], counts[x], sep=",") for x in sorted(counts))
output("\n\nCreated new database %s from Skype export archive %s." % (filename, input))
output("Database size %s, username '%s', with %s." % (sz, db.username, t))
def run_export(filenames, format, output_dir, chatnames, authornames,
start_date, end_date, media_folder, ask_password, store_password):
"""Exports the specified databases in specified format."""
dbs = [skypedata.SkypeDatabase(f) for f in filenames]
is_xlsx_single = ("xlsx_single" == format)
if is_xlsx_single: format = "xlsx"
timerange = map(util.datetime_to_epoch, (start_date, end_date))
output_dir = output_dir or os.getcwd()
for db in dbs:
if ask_password and db.username \
and (conf.SharedImageAutoDownload or conf.SharedAudioVideoAutoDownload
or conf.SharedFileAutoDownload and media_folder) \
and "html" == format:
while not db.live.is_logged_in():
password = get_password(db.username)
try: db.live.login(password=password)
except Exception as e: output("\n" + util.format_exc(e))
if store_password:
conf.Login.setdefault(db.filename, {})
conf.Login[db.filename].update(store=True, password=util.obfuscate(password))
conf.save()
formatargs = collections.defaultdict(str)
formatargs["skypename"] = os.path.basename(db.filename)
formatargs.update(db.account or {})
basename = util.safe_filename(conf.ExportDbTemplate % formatargs)
dbstr = "from %s " % db if len(dbs) != 1 else ""
if is_xlsx_single:
path = os.path.join(output_dir, "%s.xlsx" % basename)
else:
path = os.path.join(output_dir, basename)
path = util.unique_path(path)
util.try_ignore(os.makedirs, output_dir)
try:
extras = [("", chatnames)] if chatnames else []
extras += [(" with authors", authornames)] if authornames else []
output("Exporting%s%s as %s %sto %s." %
(" chats" if extras else "",
",".join("%s like %s" % (x, y) for x, y in extras),
format.upper(), dbstr, path))
chats = sorted(db.get_conversations(chatnames, authornames),
key=lambda x: x["title"].lower())
db.get_conversations_stats(chats)
bar_total = sum(c["message_count"] for c in chats)
AFTER_MAX = sys.maxsize if conf.IsCLINonTerminal else 30
bartext = " Exporting %s%s.." % (
"..." if len(db.filename) > AFTER_MAX else "",
db.filename[-AFTER_MAX:])
pulse = any(x is not None for x in timerange)
bar = ProgressBar(max=bar_total, afterword=bartext, pulse=pulse,
static=conf.IsCLINonTerminal)
bar.start()
opts = dict(progress=not conf.IsCLINonTerminal and bar.update,
timerange=timerange)
if not is_xlsx_single: opts["multi"] = True
if media_folder: opts["media_folder"] = True
result = export.export_chats(chats, path, format, db, opts)
files, count, message_count = result
bar.stop()
if count:
bar.afterword = " Exported %s from %s to %s. " % (
util.plural("message", message_count), db, path)
bar.update(bar_total)
output()
logger.info("Exported %s and %s %sto %s as %s.",
util.plural("chat", count),
util.plural("message", message_count),
dbstr, path, format.upper())
else:
output("\nNo messages to export%s." %
("" if len(dbs) == 1 else " from %s" % db))
util.try_ignore((os.unlink if is_xlsx_single else os.rmdir), path)
except Exception as e:
output("Error exporting chats: %s\n\n%s" %
(e, traceback.format_exc()))
def run_diff(filename1, filename2):
"""Compares the first database for changes with the second."""
if os.path.realpath(filename1) == os.path.realpath(filename2):
output("Error: cannot compare %s with itself." % filename1)
return
db1, db2 = map(skypedata.SkypeDatabase, [filename1, filename2])
counts = collections.defaultdict(lambda: collections.defaultdict(int))
postbacks = Queue.Queue()
AFTER_MAX = sys.maxsize if conf.IsCLINonTerminal else 20
bar_text = " Scanning %s%s vs %s%s.." % ("..." if len(db1.filename) > AFTER_MAX else "",
db1.filename[-AFTER_MAX:],
"..." if len(db2.filename) > AFTER_MAX else "",
db2.filename[-AFTER_MAX:])
bar = ProgressBar(afterword=bar_text, static=conf.IsCLINonTerminal)
bar.start()
chats1, chats2 = db1.get_conversations(), db2.get_conversations()
db1.get_conversations_stats(chats1), db2.get_conversations_stats(chats2)
args = {"db1": db1, "db2": db2, "chats": chats1, "type": "diff_left"}
worker = workers.MergeThread(postbacks.put)
if conf.IsCLINonTerminal: output()
try:
worker.work(args)
TITLE_MAX = sys.maxsize if conf.IsCLINonTerminal else 25
while True:
result = postbacks.get()
if "error" in result:
output("Error scanning %s and %s:\n\n%s" %
(db1, db2, result["error"]))
break # while True
if "done" in result:
break # while True
if "chats" in result and result["chats"]:
counts[db1]["chats"] += 1
new_chat = not result["chats"][0]["chat"]["c2"]
newstr = "" if new_chat else "new "
msgs = len(result["chats"][0]["diff"]["messages"])
contacts = len(result["chats"][0]["diff"]["participants"])
msgs_text = util.plural("%smessage" % newstr, msgs) if msgs else ""
contacts_text = util.plural("%sparticipant" % newstr, contacts) \
if contacts else ""
text = ", ".join(filter(None, [msgs_text, contacts_text]))
title = result["chats"][0]["chat"]["title"]
if len(title) > TITLE_MAX: title = title[:TITLE_MAX] + ".."
if new_chat: title += " - new chat"
bar.afterword = " %s." % ", ".join(filter(bool, [title, text]))
counts[db1]["msgs"] += msgs
if "index" in result:
bar.max = result["count"]
if not conf.IsCLINonTerminal: bar.update(result["index"])
if result.get("output"):
if not conf.IsCLINonTerminal: output() # Push bar to next line
elif result.get("chats"): bar.update()
logger.info(result["output"])
bar.afterword = ""
finally:
worker and (worker.stop(), worker.join())
bar.stop()
if conf.IsCLINonTerminal: output()
bar.afterword = " Scanned %s and %s." % (db1, db2)
bar.update(bar.max)
output()
def run_gui(filenames):
"""Main GUI program entrance."""
global logger, window
# Set up logging to GUI log window
logger.addHandler(guibase.GUILogHandler())
logger.setLevel(logging.DEBUG)
install_thread_excepthook()
sys.excepthook = except_hook
# Create application main window
app = wx.App(redirect=True) # stdout and stderr redirected to wx popup
# Avoid dialog buttons in native language
mylocale = wx.Locale(wx.LANGUAGE_ENGLISH_US, wx.LOCALE_LOAD_DEFAULT)
mylocale.AddCatalog("wxstd")
window = gui.MainWindow()
app.SetTopWindow(window) # stdout/stderr popup closes with MainWindow
# Some debugging support
window.run_console("import datetime, os, re, time, sys, wx")
window.run_console("# All %s modules:" % conf.Title)
window.run_console("from skyperious import conf, emoticons, export, "
"gui, guibase, images, live, main, searchparser, "
"skypedata, support, templates, workers")
window.run_console("from skyperious.lib import controls, util, wordcloud, wx_accel")
window.run_console("self = wx.GetApp().TopWindow # Application main window instance")
logger.info("Started application.")
for f in filter(os.path.isfile, filenames):
wx.CallAfter(wx.PostEvent, window, gui.OpenDatabaseEvent(file=f))
app.MainLoop()
def run(nogui=False):
"""Parses command-line arguments and either runs GUI, or a CLI action."""
global is_gui_possible, logger
warnings.simplefilter("ignore", UnicodeWarning)
if (getattr(sys, 'frozen', False) # Binary application
or sys.executable.lower().endswith("pythonw.exe")):
sys.stdout = ConsoleWriter(sys.stdout) # Hooks for attaching to
sys.stderr = ConsoleWriter(sys.stderr) # a text console
if "main" not in sys.modules: # E.g. setuptools install, calling main.run
srcdir = os.path.abspath(os.path.dirname(__file__))
if srcdir not in sys.path: sys.path.append(srcdir)
#sys.modules["main"] = __import__("main")
argparser = argparse.ArgumentParser(description=ARGUMENTS["description"])
for arg in ARGUMENTS["arguments"]:
argparser.add_argument(*arg.pop("args"), **arg)
subparsers = argparser.add_subparsers(dest="command")
for cmd in ARGUMENTS["commands"]:
kwargs = dict((k, cmd[k]) for k in cmd if k in ["help", "description"])
subparser = subparsers.add_parser(cmd["name"],
formatter_class=LineSplitFormatter, **kwargs)
for arg in cmd["arguments"]:
kwargs = dict((k, arg[k]) for k in arg if k != "args")
subparser.add_argument(*arg["args"], **kwargs)
argv = sys.argv[:]
if "nt" == os.name: # Fix Unicode arguments, otherwise converted to ?
argv = win32_unicode_argv(argv)
argv = argv[1:]
if not argv or (argv[0] not in subparsers.choices
and argv[0].endswith(".db")):
argv[:0] = ["gui"] # argparse hack: force default argument
if argv[0] in ("-h", "--help") and len(argv) > 1:
argv[:2] = argv[:2][::-1] # Swap "-h option" to "option -h"
arguments, _ = argparser.parse_known_args(argv)
if hasattr(arguments, "FILE1") and hasattr(arguments, "FILE2"):
arguments.FILE1 = [util.to_unicode(f) for f in arguments.FILE1]
arguments.FILE2 = [util.to_unicode(f) for f in arguments.FILE2]
arguments.FILE = arguments.FILE1 + arguments.FILE2
if arguments.FILE: # Expand wildcards to actual filenames
arguments.FILE = sum([sorted(glob.glob(f)) if "*" in f else [f]
for f in arguments.FILE], [])
arguments.FILE = list(collections.OrderedDict(
(util.to_unicode(f), 1) for f in arguments.FILE[::-1]
))[::-1] # Reverse and re-reverse to discard earlier duplicates
conf.load()
if "gui" == arguments.command and (nogui or not is_gui_possible):
argparser.print_help()
status = None
if not nogui: status = ("\n\nwxPython not found. %s graphical program "
"will not run." % conf.Title)
sys.exit(status)
elif "gui" != arguments.command:
conf.IsCLI = True
conf.IsCLIVerbose = arguments.verbose
conf.IsCLINonTerminal = arguments.no_terminal
# Avoid Unicode errors when printing to console.
enc = sys.stdout.encoding or locale.getpreferredencoding() or "utf-8"
sys.stdout = codecs.getwriter(enc)(sys.stdout, "backslashreplace")
sys.stderr = codecs.getwriter(enc)(sys.stderr, "backslashreplace")
if conf.IsCLIVerbose:
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter("%(asctime)s\t%(message)s"))
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
else:
logger.addHandler(logging.NullHandler())
if "create" == arguments.command:
run_create(arguments.FILE, arguments.input,
arguments.username, arguments.password,
arguments.ask_password, arguments.store_password)
elif "diff" == arguments.command:
run_diff(*arguments.FILE)
elif "merge" | |
<reponame>VeritasOS/krankshaft
# TODO caching?
# TODO stop **headers crap, make a headers object and pass that around...
from . import util
from .auth import Auth
from .exceptions import \
Abort, KrankshaftError, InvalidOptions, ResolveError, ValueIssue
from .serializer import Serializer
from .throttle import Throttle
from .util import Annotate
from .valid import Expecter
import functools
import inspect
import logging
import sys
import traceback
import urlparse
log = logging.getLogger(__name__)
class API(object):
'''
Create a new API.
apiv1 = API('v1')
Use it as a decorator:
@apiv1
def view(request):
return apiv1.serialize(request, 200, {
'key': 'value',
})
Or programatically:
@apiv1(only=True)
def view(request):
auth = apiv1.auth(request)
if auth:
return apiv1.serialize(request, 200, {
'authed': 'response',
})
else:
return apiv1.serialize(request, 200, {
'un-authed': 'response',
})
'''
Abort = Abort
Auth = Auth
Error = KrankshaftError
Expecter = Expecter
InvalidOptions = InvalidOptions
ResolveError = ResolveError
Serializer = Serializer
Throttle = Throttle
ValueIssue = property(lambda self: self.expecter.ValueIssue)
defaults_dispatch = {
'auth': True,
'error': None,
'methods': None,
'only': False,
'throttle': True,
'throttle_suffix': None,
}
error = 'Internal Server Error'
methods = (
'get',
'head',
'options',
'post',
'put',
'delete',
)
def __init__(self,
name=''
, debug=False
, error=None
):
'''
Options:
debug: enable debugging (default: False)
error: default un-handled exception
Example of using error:
API(error='Error, see http://host.com/in-case-of-error/')
'''
self.debug = debug
self.error = error or self.error
self.included = []
self.included_deep = []
self.loaded = False
self.name = name
self.registry = []
self.expecter = self.Expecter()
self.serializer = self.Serializer()
def __call__(self, view_or_resource=None, register=True, url=None, **opts):
'''To be used as a decorator.
@api
def view(request):
pass
Or
@api(option=value)
def view(request):
...
Or
@api
class MyResource(object):
...
Or
@api(option=value)
class MyResource(object):
...
For options, see wrap(). Only difference is register is default True.
Do not use for creating resource urls. Instead use wrap().
'''
return self.wrap(
view_or_resource,
register=register,
url=url,
**opts
)
def abort(self, response_or_request, status=None, **headers):
'''abort(request, 400)
Abort current execution with HTTP Response with given status. If a
response is given, abort current execution with given response.
api.abort(api.response(request, 400))
Example:
try:
...
api.abort(request, 400)
...
except Exception:
return api.handle_exc(request)
Or use the decorator version:
@api
def view(request):
...
api.abort(request, 400)
'''
if hasattr(response_or_request, 'method'):
# request
if status is None:
raise TypeError(
'abort() requires a status when passed a response'
)
raise self.Abort(
self.response(response_or_request, status, **headers)
)
else:
# response
if headers:
raise self.Error(
'Cannot pass headers with given a response'
)
raise self.Abort(response_or_request)
def auth(self, request, Auth=None):
'''auth(request) -> auth
Authenticate the current request and return an instance of Auth.
'''
Auth = Auth or self.Auth
auth = Auth(request)
auth.authenticate()
return auth
def challenge(self, request, auth, status=401):
return auth.challenge(self.response(request, status))
def deserialize(self, request, abortable=True):
'''deserialize(request) -> query, body
Read in the request data to native data structures.
'''
from django.utils.datastructures import MultiValueDict
try:
query = urlparse.parse_qs(
request.META.get('QUERY_STRING', ''),
keep_blank_values=True
)
query = MultiValueDict(query)
content_type = request.META.get('CONTENT_TYPE')
content_length = request.META.get('HTTP_CONTENT_LENGTH',
request.META.get('CONTENT_LENGTH', 0)
)
try:
content_length = int(content_length)
except ValueError:
content_length = 0
if content_type and content_length > 0:
data = self.serializer.deserialize_request(
request,
content_type
)
else:
data = {}
return (query, data)
except ValueError:
if abortable:
self.abort(request, 400)
else:
raise
except self.serializer.Unsupported:
if abortable:
self.abort(request, 406)
else:
raise
def dispatch(self, view, opts, request, *args, **kwargs):
'''dispatch(view, None, request, *args, **kwargs) -> response
Dispatch a view function wrapping it in exception handling (support
for api.abort()) as well as handle authenticating, throttling, ... as
defined by opts.
'''
opts = self.options_dispatch(opts)
try:
if opts['only']:
return view(request, *args, **kwargs)
if opts['methods'] is not None \
and request.method not in opts['methods'] \
and request.method.lower() not in opts['methods']:
return self.response(request, 405,
Allow=', '.join([
method.upper()
for method in opts['methods']
])
)
auth = self.Auth if opts['auth'] is True else opts['auth']
throttle = None
if auth:
auth = auth(request)
auth.authenticate()
if not auth:
return self.challenge(request, auth)
throttle = \
self.Throttle \
if opts['throttle'] is True \
else opts['throttle']
if throttle:
throttle = throttle(request, auth)
allowed, headers = \
throttle.allow(suffix=opts['throttle_suffix'])
if not allowed:
return self.throttled(request, **headers)
with Annotate(request, {
'auth': auth,
'throttle': throttle,
}, delete=['user'], cleanup=False):
from django.contrib.auth.models import AnonymousUser
request.user = (auth and auth.user) or AnonymousUser()
return view(request, *args, **kwargs)
except Exception:
return self.handle_exc(request, error=opts['error'])
def endpoint(self, name):
'''endpoint(myview.__name__) -> 'api_v1_myview'
Define how URLs should be named.
'''
n = 'api_'
if self.name:
n += self.name + '_'
n += name
return n
def expect(self, expected, data, **opts):
'''expect({'key': int}, {'key': '1'}) -> clean data
In the above scenario, the returned data is:
{'key': 1}
Notice that the 1 goes from a string to an integer as part of the
cleaning process. Ideally, the returned datastructure can be fed into
whatever it needs to be safely at this point without worrying about
types of the values as a side-effect of just validating a proper data
structure (no extraneous keys and proper expected values for the key).
Simple validators as well as complex are supported. See
krankshaft.valid module for more details.
Raises ValueIssue when expected does not properly validate data.
'''
return self.expecter.expect(expected, data, **opts)
def extra(self, **more):
data = {
'api': self.name,
'debug': self.debug,
'stack': True,
}
data.update(more)
return data
def handle_exc(self, request, exc_info=True, error=None):
'''handle_exc(request) -> response
Handle arbitrary exceptions. Serves two main purposes:
1) needed to support abort()
2) pass other exceptions to handler500()
'''
if exc_info is True:
exc_info = sys.exc_info()
exc, inst, tb = exc_info
if issubclass(exc, self.Abort):
return inst.response
else:
return self.handler500(request, exc_info, error=error)
def handler500(self, request, exc_info, error=None):
'''handler500(request, sys.exc_info())
Returns a 500 response with error details.
'''
exc, inst, tb = exc_info
error = error or self.error
log.error(
'%s, %s: %s',
error,
exc.__name__,
inst,
exc_info=exc_info,
extra=self.extra(),
)
data = {
'error': error,
}
if self.debug:
data['exception'] = '%s: %s' % (exc.__name__, inst)
data['traceback'] = traceback.format_exception(*exc_info)
data = self.hook_500(data, request, exc_info)
return self.serialize(request, 500, data)
def hook_500(self, data, request, exc_info):
'''hook_500(data, request, exc_info) -> data
Convenience hook for changing data returned from a 500.
'''
return data
# TODO patch vary headers...
# - default to "Vary: Accept", depending on authn type, do we change it to
# "Vary: Accept, Cookie" also?
# TODO cache control headers...
# - default to "Cache-Control: no-store" and "Pragma: no-cache"?
# - check out tastypie.cache.SimpleCache
def hook_response(self, response):
'''hook_response(response) -> response
Hook to update a response after creation.
'''
# TODO probably need to de-construct the Content-Type via the standard
# and patch the header... general purpose 'http' module that implements
# rfc standards parsing routines?
response['Content-Type'] += '; charset=utf-8'
return response
def include(self, otherapi, deep=False):
'''include(otherapi)
Include another API's views/resources in this API's lookup mechanisms.
If deep is True, any included apis in otherapi will also be included.
'''
if otherapi is self:
raise self.Error('Refusing to include itself')
if otherapi in self.included:
raise self.Error('Refusing to add api twice to included list')
if deep:
self.included_deep.append(otherapi)
else:
self.included.append(otherapi)
def load(self):
'''load()
Resources need a bit of extra help to know when everything is considered
loaded. When that's the case, the resources can make mappings between
eachother using the API registry. This signals that the registry
is fully loaded (typically due to the call to `api.urls`).
You may use the load routine on your resource to do any finalization
before they're wired into the urls.
'''
if self.loaded:
return
for view in self.registered_views:
load = getattr(view, 'load', None)
if load:
load()
self.loaded = True
def make_resource_helper(self, klass_to_wrap, opts):
'''make_resource_helper(klass) -> helper_instance
This helper makes it possible to decorate a class and have it be
connectable to Django.
'''
api = self
# annotate wrapping api onto the class so it may be used without a prior
# outside reference
klass_to_wrap.api = api
class Helper(object):
__doc__ = klass_to_wrap.__doc__
instance = klass_to_wrap()
klass = klass_to_wrap
def __call__(self, request, *args, **kwargs):
view = lambda request, *args, **kwargs: \
api.route(self.instance, request, args, kwargs)
return api.dispatch(view, opts, request, *args, **kwargs)
def __getattr__(self, name):
return getattr(self.instance, name)
Helper.__module__ = klass_to_wrap.__module__
Helper.__name__ = 'Helper.' + klass_to_wrap.__name__
return Helper()
def options_dispatch(self, opts):
'''options_dispatch({'auth': False}) -> {'auth': False, ... defaults}
Options:
auth: |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.