code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
import dash_bootstrap_components as dbc
from dash import html, dash_table
import pandas as pd
from merlion.dashboard.settings import *
from merlion.dashboard.utils.plot import plot_timeseries
styles = {
"json-output": {"overflow-y": "scroll", "height": "calc(90% - 25px)", "border": "thin lightgrey solid"},
"tab": {"height": "calc(98vh - 80px)"},
"log-output": {
"overflow-y": "scroll",
"height": "calc(90% - 25px)",
"border": "thin lightgrey solid",
"white-space": "pre-wrap",
},
}
def create_modal(modal_id, header, content, content_id, button_id):
modal = html.Div(
[
dbc.Modal(
[
dbc.ModalHeader(dbc.ModalTitle(header)),
dbc.ModalBody(content, id=content_id),
dbc.ModalFooter(dbc.Button("Close", id=button_id, className="ml-auto", n_clicks=0)),
],
id=modal_id,
is_open=False,
)
]
)
return modal
def create_param_table(params=None, height=100):
if params is None or len(params) == 0:
data = [{"Parameter": "", "Value": ""}]
else:
data = [{"Parameter": key, "Value": str(value["default"])} for key, value in params.items()]
table = dash_table.DataTable(
data=data,
columns=[{"id": "Parameter", "name": "Parameter"}, {"id": "Value", "name": "Value"}],
editable=True,
style_header_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_cell_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_table={"overflowX": "scroll", "overflowY": "scroll", "height": height},
style_header=dict(backgroundColor=TABLE_HEADER_COLOR, color="white"),
style_data=dict(backgroundColor=TABLE_DATA_COLOR),
)
return table
def create_metric_table(metrics=None):
if metrics is None or len(metrics) == 0:
data, columns = {}, []
for i in range(4):
data[f"Metric {i}"] = "-"
columns.append({"id": f"Metric {i}", "name": f"Metric {i}"})
else:
data = metrics
columns = [{"id": key, "name": key} for key in metrics.keys()]
if not isinstance(data, list):
data = [data]
table = dash_table.DataTable(
data=data,
columns=columns,
editable=False,
style_header_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_cell_conditional=[{"textAlign": "center", "font-family": "Salesforce Sans"}],
style_table={"overflowX": "scroll"},
style_header=dict(backgroundColor=TABLE_HEADER_COLOR, color="white"),
style_data=dict(backgroundColor=TABLE_DATA_COLOR),
)
return table
def create_empty_figure():
return plot_timeseries(pd.DataFrame(index=pd.DatetimeIndex([]))) | /salesforce-merlion-2.0.2.tar.gz/salesforce-merlion-2.0.2/merlion/dashboard/pages/utils.py | 0.42179 | 0.174692 | utils.py | pypi |
import re
import datetime
import requests
from salesforce_reporting.login import Connection
BASE_URL = "https://{instance}/services/data/{api_version}/analytics"
class Chunky(Connection):
"""
Uses salesforce_reporting module to extract chunked data by use of a "Time Frame" column in sales force. Requires a Salesforce account and security token.
Args:
username (str): Salesforce username
password (str): Salesforce password
security_token (str): Salesforce security token
sandbox (bool): Run report in Salesforce sandbox (default False)
api_version (str): Salesforce reporting API version (default v29.0")
Example:
>>> from salesforce_reporting_chunked import chunk_report_by_date
>>> CONFIG = {
... "security_token": "REPLACE WITH YOUR TOKEN",
... "username": "REPLACE WITH YOUR USERNAME",
... "password": "REPLACE WITH YOUR PASSWORD",
... "api_version": "v38.0",
... }
>>> FIELDNAMES = [
... "First Name",
... "Last Name",
... "Date Column", # this is the magic column used for chunking.
... "Corhuscorrated Plethanth",
... "Other Column",
... ]
>>> REPORT_ID = "YOURREPORTID"
>>> data = chunk_report_by_date(
... CONFIG,
... REPORT_ID,
... FIELDNAMES,
... date_fieldname="Date Column",
... start_date="2018-01-01",
... start_date="2019-01-31",
... )
>>> next(data)
OrderedDict([('First Name', 'Fred'),('Last Name', 'Garvin'),('DATE_COLUMN_NAME', '2018-01-01'),('Corhuscorrated Plethanth', True),('Other Column': 'Yep. Another')])
"""
def __init__(
self,
username=None,
password=None,
security_token=None,
sandbox=False,
api_version="v29.0",
):
self.username = username
self.password = password
self.security_token = security_token
self.sandbox = sandbox
self.api_version = api_version
self.login_details = self.login(
self.username, self.password, self.security_token
)
self.token = self.login_details["oauth"]
self.instance = self.login_details["instance"]
self.headers = {"Authorization": "OAuth {}".format(self.token)}
self.base_url = BASE_URL.format(
instance=self.instance, api_version=self.api_version
)
def _get_report_filtered(self, url, filters=None, standard_date_filter=None):
"""
Filter report on filters and/or standard_date_filter.
Args:
url (str) Well-formed Salesforce API endpoint.
filters (list) List of dictionaries in Salesforce "reportFilters" format.
standard_date_filter (dict) Salesforce "standardDateFilter" dictionary.
Returns:
requests.post().json() (dict) Salesforce reports object.
Example:
>>> # standard_date_filter JSON object as described in https://developer.salesforce.com/docs/atlas.en-us.api_analytics.meta/api_analytics/sforce_analytics_rest_api_getbasic_reportmetadata.htm
>>> {
... 'column': 'foo.TheDate',
... 'durationValue': 'CUSTOM',
... 'endDate': '2019-01-01',
... 'startDate': '2019-01-01',
... }
"""
# metadata for next request. We modify it by adding reportFilters
# and/or standardDateFilter
metadata_url = url.split("?")[0]
metadata = self._get_metadata(metadata_url)
if filters:
for report_filter in filters:
metadata["reportMetadata"]["reportFilters"].append(report_filter)
if standard_date_filter:
standard_date_filter = _sdf_fieldname_from_label(
metadata, standard_date_filter
)
metadata["reportMetadata"]["standardDateFilter"] = standard_date_filter
return requests.post(url, headers=self.headers, json=metadata).json()
def get_daterange_chunked_report(
self,
report_id,
filters=None,
details=True,
date_fieldname=None,
start_date=None,
end_date=None,
day_increment=1,
):
"""
Get chunked report by daterange. Anything more than 1 may result in unforseen results, so think it through.
Args:
report_id (str): Final portion of Salesforce API endpoint for report.
filters (list): List of dictionaries in Salesforce "reportFilters" format. {field: filter}, optional.
details (bool): Whether or not detail rows are included in report output, default True
date_fieldname (str): Column name of sortable date field from Salesforce report page.
start_date (str): iso-formatted date string. ex: "2019-01-01".
end_date (str): iso-formatted date string. ex: "2019-01-01".
day_increment (int): Number of days to "chunk" report by. Default 1.
Yields:
row (OrderedDict): report row
Example:
>>> REPORT_ID = "abc123youandmegirl"
>>> data = get_daterange_chunked_report(REPORT_ID, date_fieldname="The_Date", start_date="2019-06-01", end_date="2019-06-30")
>>> next(data)
"""
# iso formatted date YYYY-MM-DD
date_rex = "(\d{4})-(\d{2})-(\d{2})"
assert re.match(date_rex, start_date)
assert re.match(date_rex, end_date)
assert date_fieldname
increment = datetime.timedelta(days=day_increment)
start_date = datetime.date(*[int(_) for _ in start_date.split("-")])
end_date = datetime.date(*[int(_) for _ in end_date.split("-")])
# loop through dates by increment
while start_date <= end_date:
standard_date_filter = {
"column": date_fieldname,
"durationValue": "CUSTOM",
"endDate": start_date.isoformat(),
"startDate": start_date.isoformat(),
}
start_date = start_date + increment
yield self.get_report(
report_id,
filters=filters,
standard_date_filter=standard_date_filter,
details=details,
)
def get_report(
self, report_id, filters=None, standard_date_filter=None, details=True
):
"""
Return the full JSON content of a Salesforce report, with or without filters.
Args:
report_id (str): Final portion of Salesforce API endpoint for report.
filters (list): List of dictionaries in Salesforce "reportFilters" format. {field: filter}, optional.
details (bool): Whether or not detail rows are included in report output, default True
Returns:
report (json): Salesforce report
"""
details = "true" if details else "false"
url = "{}/reports/{}?includeDetails={}".format(
self.base_url, report_id, details
)
if any([filters, standard_date_filter]):
return self._get_report_filtered(url, filters, standard_date_filter)
else:
return self._get_report_all(url)
def _sdf_fieldname_from_label(metadata, standard_date_filter):
"""
Update the "column" value of standard_date_filter dictionary with
internal date-sortable fieldname.
Args:
metadata (dict) metadata returned by Salesforce API
standard_date_filter (dict) Salesforce API date filter
Returns:
standard_date_filter (dict)
Example:
>>> standard_date_filter = {
... "column": "CREATED_DATE",
... "durationValue": "CUSTOM",
... "endDate": "2019-01-01",
... "startDate": "2019-06-30",
... }
>>> metadata = {
... "reportExtendedMetadata": {
... "detailColumnInfo": {
... "weird_internal_name___c": {
... "label": "CREATED_DATE",
... "dataType": "string",
... }
... }
... }
... }
>>> _sdf_fieldname_from_label(metadata, standard_date_filter)
{'column': 'weird_internal_name___c', 'durationValue': 'CUSTOM', 'endDate': '2019-01-01', 'startDate': '2019-06-30'}
"""
# column is human-readable name, ex: "Next Renewal Date"
column = standard_date_filter["column"]
# column names are puked out in this object
detail_column_info = metadata["reportExtendedMetadata"]["detailColumnInfo"]
# we need the fieldname used internally by API, rather than
# human-readable fieldname in the reports interface.
# ex: Foobar__Batbaz__c.Foobar__TransactionDate__c is displayed as
# TransactionDate in web interface.
for date_fieldname in detail_column_info.keys():
if detail_column_info[date_fieldname]["label"] == column:
standard_date_filter["column"] = date_fieldname
return standard_date_filter | /salesforce-reporting-chunked-0.1.7.tar.gz/salesforce-reporting-chunked-0.1.7/salesforce_reporting_chunked/chunky.py | 0.690455 | 0.217088 | chunky.py | pypi |
class ReportParser:
"""
Parser with generic functionality for all Report Types (Tabular, Summary, Matrix)
Parameters
----------
report: dict, return value of Connection.get_report()
"""
def __init__(self, report):
self.data = report
self.type = self.data["reportMetadata"]["reportFormat"]
self.has_details = self.data["hasDetailRows"]
def get_grand_total(self):
return self.data["factMap"]["T!T"]["aggregates"][0]["value"]
@staticmethod
def _flatten_record(record):
return [field["label"] for field in record]
def records(self):
"""
Return a list of all records included in the report
If detail rows are not included in the report a ValueError is returned instead.
Returns
-------
records: list
"""
if self.has_details:
records = []
fact_map = self.data["factMap"]
groupings = list(fact_map.values())
for group in groupings:
rows = group["rows"]
group_records = [self._flatten_record(row["dataCells"]) for row in rows]
for record in group_records:
records.append(record)
return records
else:
raise ValueError('Report does not include details so cannot access individual records')
class MatrixParser(ReportParser):
"""
Parser with specific functionality for matrix reports
Parameters
----------
report: dict, return value of Connection.get_report()
"""
def __init__(self, report):
super().__init__(report)
self.data = report
self._check_type()
def _check_type(self):
report_format = self.data["reportMetadata"]["reportFormat"]
if report_format != "MATRIX":
raise ValueError
else:
pass
def get_col_total(self, col_heading, default=None):
"""
Return the total for the selected row
Parameters
----------
col_heading: string
default: string, optional, default None
If column is not found determines the return value
Returns
-------
total: int
"""
grp_across_list = self.data["groupingsAcross"]["groupings"]
col_dict = {grp['label']: int(grp['key']) for grp in grp_across_list}
try:
col_key = col_dict[col_heading]
aggregate_key = 'T!{}'.format(col_key)
return self.data["factMap"][aggregate_key]["aggregates"][0]["value"]
except KeyError:
return default
def get_row_total(self, row_heading, default=None):
"""
Return the total for the selected row
Parameters
----------
row_heading: string
default: string, optional, default None
If row is not found determines the return value
Returns
-------
total: int
"""
grp_down_list = self.data["groupingsDown"]["groupings"]
row_dict = {grp["label"]: int(grp["key"]) for grp in grp_down_list}
try:
row_key = row_dict[row_heading]
aggregate_key = '{}!T'.format(row_key)
return self.data["factMap"][aggregate_key]["aggregates"][0]["value"]
except KeyError:
return default
@staticmethod
def _convert_parameter(parameter):
if type(parameter) is str:
new_parameter = [parameter]
elif parameter is None:
new_parameter = []
elif type(parameter) is list:
new_parameter = parameter
else:
raise ValueError
return new_parameter
@staticmethod
def _get_subgroup_index(group_above, subgroup_name):
subgroups_with_index = {subgroup['label']: index for index, subgroup in enumerate(group_above)}
index = subgroups_with_index[subgroup_name]
return index
def _get_grouping(self, groups_of_interest, start_grouping, count):
current_grouping = start_grouping
while count > 1:
group_name = groups_of_interest[count - 2]
subgroup_index = self._get_subgroup_index(current_grouping, group_name)
current_grouping = current_grouping[subgroup_index]["groupings"]
count -= 1
self._get_grouping(group_name, current_grouping, count)
return current_grouping
def _get_static_key(self, groups_of_interest, static_grouping_key):
grouping_depth = len(groups_of_interest)
group_index = grouping_depth - 1
top_grouping = self.data[static_grouping_key]["groupings"]
grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth)
keys = {group['label']: group['key'] for group in grouping}
static_key = keys[groups_of_interest[group_index]]
return static_key
def _get_dynamic_keys(self, groups_of_interest, dynamic_grouping_key):
grouping_depth = len(groups_of_interest) + 1
top_grouping = self.data[dynamic_grouping_key]["groupings"]
grouping = self._get_grouping(groups_of_interest, top_grouping, grouping_depth)
dynamic_keys = [group["key"] for group in grouping]
labels = [group["label"] for group in grouping]
return {"keys": dynamic_keys, "labels": labels}
def _build_keys(self, static_groups_of_interest, dynamic_groups_of_interest, static_grouping_key,
dynamic_grouping_key):
static_key = self._get_static_key(static_groups_of_interest, static_grouping_key)
dynamic_keys = self._get_dynamic_keys(dynamic_groups_of_interest, dynamic_grouping_key)
keys = []
if static_grouping_key == "groupingsAcross":
for el in dynamic_keys["keys"]:
key = "{}!{}".format(el, static_key)
keys.append(key)
else:
for el in dynamic_keys["keys"]:
key = "{}!{}".format(static_key, el)
keys.append(key)
return {"keys": keys, "labels": dynamic_keys["labels"]}
def _series(self, static_groups_of_interest, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=None, value_position=0):
static_groups_of_interest = self._convert_parameter(static_groups_of_interest)
dynamic_groups_of_interest = self._convert_parameter(dynamic_groups_of_interest)
keys_labels = self._build_keys(static_groups_of_interest, dynamic_groups_of_interest,
static_grouping_key, dynamic_grouping_key)
labels = keys_labels["labels"]
values = []
for key in keys_labels["keys"]:
value = self.data["factMap"][key]["aggregates"][value_position]["value"]
values.append(value)
series = dict(zip(labels, values))
return series
def series_down(self, column_groups, row_groups=None, value_position=0):
"""
Return selected slice of a report on a vertical axis
Parameters
----------
column_groups: string or list
The selected column to return series from
If multiple grouping levels a list is used to identify grouping of interest
row_groups: string, list or None, optional, default None
Limits rows included in Series to those within specified grouping
value_position: int, default 0
Index of value of interest, if only one value included by default will select
correct value
Returns
-------
series: dict, {label: value, ...}
"""
static_grouping_key = "groupingsAcross"
dynamic_grouping_key = "groupingsDown"
return self._series(column_groups, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=row_groups, value_position=value_position)
def series_across(self, row_groups, col_groups=None, value_position=0):
"""
Return selected slice of a report on a horizontal axis
Parameters
----------
row_groups: string or list
The selected row to return series from
If multiple grouping levels a list is used to identify grouping of interest
col_groups: string, list or None, optional, default None
Limits cols included in Series to those within specified grouping
value_position: int, default 0
Index of value of interest, if only one value included by default will select
correct value
Returns
-------
series: dict, {label: value, ...}
"""
static_grouping_key = "groupingsDown"
dynamic_grouping_key = "groupingsAcross"
return self._series(row_groups, static_grouping_key, dynamic_grouping_key,
dynamic_groups_of_interest=col_groups, value_position=value_position) | /salesforce-reporting-0.1.1.tar.gz/salesforce-reporting-0.1.1/salesforce_reporting/parsers.py | 0.928684 | 0.297993 | parsers.py | pypi |
import urllib.parse
from salesforce_tools.salesforce import SalesforceAPI
from enum import Enum
from salesforce_tools.bulk_models import JobInfo, JobInfoList, BatchInfo, BatchInfoList, \
OperationEnum, ContentTypeEnum, ContentTypeHeaderEnum, JobTypeEnum, JobStateEnum, BulkAPIError, APIError,\
BulkException
from typing import Union, Optional, List
from pydantic.v1 import BaseModel, ValidationError, parse_obj_as
class BulkJobException(Exception):
pass
def get_enum_or_val(e):
return e.value if isinstance(e, Enum) else e
class BulkAPI(SalesforceAPI):
job: Optional[Union[JobInfo, BulkAPIError]]
batches: List[BatchInfo] = []
@property
def job(self):
return self.__job
@job.setter
def job(self, value: JobInfo):
if value and isinstance(value, JobInfo):
value.job_type = value.job_type or (JobTypeEnum.V2Ingest
if value.content_url and 'ingest' in value.content_url
else JobTypeEnum.Classic)
self.job_type = value.job_type
else:
self.job_type = JobTypeEnum.Classic
self.__job = value
def __init__(self, job_id: str = None, job: JobInfo = None, **kwargs):
super().__init__(**kwargs)
self.job = kwargs.get('job', self.set_job(job_id) if job_id else job)
def _get_results_url(self, job_type=None, operation=None):
if isinstance(self.job, JobInfo):
job_type = JobTypeEnum.V2Ingest if job_type == JobTypeEnum.V2Ingest else JobTypeEnum.Classic
operation = operation or self.job.operation
if operation == "query":
return f"/services/data/v{self.api_version}/jobs/query/{self.job.id}/results"
if job_type == JobTypeEnum.V2Ingest:
return f"/services/data/v{self.api_version}/jobs/ingest/{self.job.id}/results"
else:
return f'/services/async/{self.api_version}/job/{self.job.id}/batch'
def _get_job_url(self, job_type=None, operation=None):
job_type = job_type or self.job_type
operation = operation or self.job.operation if isinstance(self.job, JobInfo) else None
url = f'/services/data/v{self.api_version}/jobs/ingest' if job_type == JobTypeEnum.V2Ingest else \
f'/services/async/{self.api_version}/job'
if operation == 'query':
url = f'/services/data/v{self.api_version}/jobs/query'
else:
return f'/services/async/{self.api_version}/job'
return url
def create_job(self, job: JobInfo):
self.job = job
job_type = JobTypeEnum.V2Ingest if job.job_type == JobTypeEnum.V2Ingest else JobTypeEnum.Classic
url = self._get_job_url()
if not job.id:
d = job.dict(by_alias=True, exclude_none=True, exclude={'job_type'})
job, ok, *_ = self.request(url, method='POST', json=d)
self.job = self._model_wrap(job, ok, JobInfo)
if ok:
self.job.job_type = job_type
return self.job
# TODO: Where to put "static helper methods"?
def get_jobs(self):
jobs, ok, *_ = self.request(f'/services/data/v{self.api_version}/jobs/ingest')
return self._model_wrap(jobs, ok, JobInfoList, False)
def upload_data(self, data):
if isinstance(self.job, BulkAPIError):
raise BulkException(self.job)
job_id = self.job.id
content_type = getattr(ContentTypeHeaderEnum, self.job.content_type).value
url = f'/services/async/{self.api_version}/job/{job_id}/batch'
data, ok, *_ = self.request(url, method='POST', data=data, headers={'Content-Type': content_type})
batch = self._model_wrap(data, ok, BatchInfo, False)
self.batches.append(batch)
return batch
def _model_wrap(self, data: any, ok: bool, model: BaseModel, raise_exception_on_error=False):
if ok:
o = parse_obj_as(model, data)
else:
if isinstance(data, list):
try:
o = parse_obj_as(List[BulkAPIError], data)
except ValidationError:
o = parse_obj_as(List[APIError], data)
else:
if data.get('error'):
data = data.get('error')
try:
o = parse_obj_as(BulkAPIError, data)
except Exception:
o = parse_obj_as(APIError, data)
if raise_exception_on_error:
raise ValueError(o)
return o
def close_job(self, job_id: str = None, state: JobStateEnum = JobStateEnum.UploadComplete):
job_id = job_id or self.job.id
url = f'/services/async/{self.api_version}/job/{job_id}'
data, ok, *_ = self.request(url, method='POST', json={"state": state.value})
job = self._model_wrap(data, ok, JobInfo, False)
if ok and job.id == self.job.id:
self.job = job
return job
def set_job(self, job_id, job_type=JobTypeEnum.Classic, operation=None):
url = f"{self._get_job_url(job_type, operation)}/{job_id}"
data, ok, *_ = self.request(url, method='GET')
self.job = self._model_wrap(data, ok, JobInfo, False)
return self.job
def get_batch_info(self, batch: BatchInfo = None, batch_id: str = None):
url = f'/services/async/{self.api_version}/job/{self.job.id}/batch'
data, ok, *_ = self.request(url, method='GET')
return self._model_wrap(data, ok, BatchInfoList, False)
def get_batches(self, job_id: str = None):
url = f'/services/async/{self.api_version}/job/{self.job.id}/batch'
data, ok, *_ = self.request(url, method='GET')
self.batches = self._model_wrap(data, ok, BatchInfoList, True).records
return self.batches
# Sforce-Locator, Sforce-NumberOfRecords, Sforce-Limit-Info
def get_results(self, locator=None, max_records=None):
params = {'locator': locator, 'maxRecords': max_records}
params = {k: v for k, v in params.items() if v}
url = urllib.parse.urljoin(self._get_results_url(), '?' + urllib.parse.urlencode(params))
return self.request(url) | /salesforce_tools-0.2.1-py3-none-any.whl/salesforce_tools/bulk.py | 0.554109 | 0.152568 | bulk.py | pypi |
# salesforce
A python connector for Salesforce
## connect
```
from salesforce import Connector
conn = Connector(username, password, security_token)
```
### connect to sandbox
```
from salesforce import Connector
conn = Connector(username, password, security_token, subdomain='test')
```
### optional parameters
```
max_reties = set a maximum number of retries
version = set a version other than the default, 44.0
client_id = id of the app to tag the call with
```
## select()
Pass in the name of the object, a list of the field names you want and the criteria with %s where your variables will go and a list of your values to identify which records to select.
Note that you will need to add single quotes around your %s placeholders if the field type requires it, like strings do.
A list of dictionaries will be returned.
```
from salesforce import Connector
conn = Connector(username, password, security_token)
fields = ['firstname', 'lastname]
criteria = 'WHERE id = %s'
values = ['003D000000QV9n2IAD']
results = conn.select('contact', fields, criteria, values)
```
### IN Criteria
This connector is set up to dynamically handle IN criteria.
For example the below will query Salesforce for: SELECT Id FROM Contact WHERE FirstName IN ('Sarah', 'James', 'Jamie')
Note that in the case of in criteria quotes will automatically be placed in the query for you if the variables in the list are type str.
```
from salesforce import Connector
conn = Connector(username, password, security_token)
fields = ['Id']
criteria = 'WHERE id IN %s'
values = [['Carey', 'Casey', 'Jamie']]
results = conn.select('contact', fields, criteria, values)
```
## create()
Pass in the object name and a dictionary with the data to use in the create.
```
from salesforce import Connector
conn = Connector(username, password, security_token)
data = {"FirstName": "Jamie",
"LastName": "Doe",
"Email": jdoe@gmail.com}
conn.create("Contact", data)
```
## update()
Pass in the id, object name and a dictionary with the data to use in the update.
```
from salesforce import Connector
conn = Connector(username, password, security_token)
record_id = "003advearera"
data = {"FirstName": "Carey"}
conn.create(record_id, "Contact", data)
```
## delete()
Pass in the id of the record to delete
```
from salesforce import Connector
conn = Connector(username, password, security_token)
conn.delete("003advearera", "Contact")
```
## Bulk Operations
These methods use the [Salesforce SObjects Collections endpoints](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_composite_sobjects_collections.htm).
### General options
all_or_none: [Specifies whether you want one error to roll back the batch, or not.](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/requests_composite.htm) Remember batches are 200, so if you pass in over 200 records only the 200 in that batch will be rolled back. Batches before and after will proceed unless they also have errors.
batch_size: This defaults to 200, the maximum that Salesforce allows, but you can specify a smaller batch size if you want.
### bulk_create()
This method enables you to create records in batches of up to 200.
[Salesforce SObject Collections Create Endpoint Reference](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_composite_sobjects_collections_create.htm).
If the records are all of the same type you can pass the object name directly in the bulk_change() call. If they are of different types you will need to use the add_attributes method to set the correct type information before using the bulk_change method.
For example this will create two contacts:
```
from salesforce import Connector
conn = Connector(username, password, security_token)
contacts = [{"FirstName": "Jamie",
"LastName": "Doe",
"Email": jdoe@gmail.com,
"AccountId": "001qervaaer"},
{"FirstName": "Carey",
"LastName": "Doe",
"Email": cdoe@gmail.com,
"AccountId": "001qervaaer"}
]
conn.bulk_create(contacts, object_name = 'Contact')
```
This will create an Account and a contact:
```
from salesforce import Connector
conn = Connector(username, password, security_token)
contact = {"FirstName": "Jamie",
"LastName": "Doe",
"Email": jdoe@gmail.com,
"AccountId": "001qervaaer"}
account = {"Name": "Fake Corp"}
acc_attr = conn.add_attributes(account, "Account")
cont_attr = conn.add_attributes(contact, "Contact")
conn.bulk_change([acc_attr, cont_attr])
```
### bulk_update()
This works the same way as the bulk create above except you need to include the record id in the payload.
[Salesforce SObjects Collections Update Endpoint Reference](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/resources_composite_sobjects_collections_update.htm)
### add_attributes()
This method enables you to easily add the object name to an object to make using the bulk create and update methods easier.
It also gives you the ability to add a referenceId, which makes finding the response for specific records easier, and any other kwargs you might need to add to the [attributes dictionary](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/responses_composite_sobject_tree.htm?search_text=referenceid) within your payload.
```
from salesforce import Connector
conn = Connector(username, password, security_token)
contact = {"FirstName": "Jamie",
"LastName": "Doe",
"Email": jdoe@gmail.com,
"AccountId": "001qervaaer"}
attr_cont = conn.add_attributes(contact, "Contact", "jdoe@gmail.com")
```
### bulk_delete()
This method enables you to quickly delete multiple records. It is similar to the other bulk operations, but does not require a record type to be specified and accepts a list of Salesforce record ids instead of a list of dictionaries.
```
from salesforce import Connector
conn = Connector(username, password, security_token)
to_delete = ['0011qewavawer', '003averatea]
response = conn.delete(to_delete, False)
```
## Create Nested Records
Salesforce gives you the option to create parent and child records in one call. For example creating an Account with Contacts.
[Salesforce Composite Sobject Tree Create Endpoint Reference](https://developer.salesforce.com/docs/atlas.en-us.api_rest.meta/api_rest/dome_composite_sobject_tree_create.htm)
### nested_insert()
```
from salesforce import Connector
data = {
"attributes" : {"type" : "Account", "referenceId" : "ref1"},
"name" : "SampleAccount1",
"phone" : "1234567890",
"website" : "www.salesforce.com",
"Contacts" : {
"records" : [{
"attributes" : {"type" : "Contact", "referenceId" : "ref2"},
"lastname" : "Smith",
"email" : "smith@salesforce.com"
},{
"attributes" : {"type" : "Contact", "referenceId" : "ref3"},
"lastname" : "Evans",
"email" : "evans@salesforce.com"
}]
}
}
conn = Connector(username, password, security_token)
response = conn.nested_insert(data, 'Account')
```
### build_nested()
A helper to generate the data structure for the nested insert
```
from salesforce import Connector
account = {"name" : "SampleAccount1",
"phone" : "1234567890",
"website" : "www.salesforce.com"}
contacts = [{
"lastname" : "Smith",
"email" : "smith@salesforce.com"
},{
"lastname" : "Evans",
"email" : "evans@salesforce.com"
}]
attr_acc = conn.add_attributes(account, 'account', 'acc1')
attr_conts = [conn.add_attributes(c, 'contact', c['email']) for c in contacts]
conn = Connector(username, password, security_token)
nested = conn.build_nested('Account', attr_acc, {'Contacts': attr_conts}])
```
## call()
This method enables you to specify the url, method and data to send to Salesforce. You will probably want to use the create, update, delete, select, bulk_create, bulk_update, bulk_delete methods most of the time, but it gives you the option if there is functionality that is not covered here.
| /salesforce-0.0.1.tar.gz/salesforce-0.0.1/README.md | 0.44553 | 0.866923 | README.md | pypi |
from __future__ import unicode_literals, with_statement
import csv
from datetime import datetime, timedelta
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from itertools import chain
import logging
import os
import time
from xml.etree import ElementTree
import requests
from simple_salesforce import Salesforce
logger = logging.getLogger('salesforce-bulk-api')
NAMESPACE = 'http://www.force.com/2009/06/asyncapi/dataload'
def salesforce_session():
"""Returns an authenticated simple_salesforce.Salesforce instance."""
return Salesforce(username=os.environ['SALESFORCE_USERNAME'],
password=os.environ['SALESFORCE_PASSWORD'],
security_token=os.environ['SALESFORCE_SECURITY_TOKEN'],
instance=os.environ['SALESFORCE_INSTANCE'],
sandbox=os.environ.get('SALESFORCE_SANDBOX') == 'True',
version='34.0')
class SalesforceBulkJob:
"""A Python interface to the Salesforce Bulk API."""
PUBLISHING_BATCH_SIZE = 9999
SUPPORTED_OPERATIONS = {'insert', 'update', 'delete', 'upsert'}
def __init__(self, operation, object_name, external_id_field=None, salesforce=None):
"""Creates a new API interface to Salesforce's bulk API, from which any
number of jobs may be created. The operation should be one of ('insert',
'update', 'upsert', or 'delete'), and the object_name should be the
proper-case name of a Salesforce object (like Lead or Contact)."""
if not salesforce:
salesforce = salesforce_session()
self.session_id = salesforce.session_id
self.async_url = (salesforce.base_url
.replace('/data/', '/async/')
.replace('v' + salesforce.sf_version,
salesforce.sf_version))
assert operation in self.SUPPORTED_OPERATIONS, '{} is not a valid bulk operation.'.format(operation)
self.operation = operation
supported_objects = {o['name'] for o in salesforce.describe()['sobjects']}
assert object_name in supported_objects, '{} is not a known Salesforce object.'.format(object_name)
self.object_name = object_name
self.external_id_field = external_id_field
self.reset()
def upload(self, fields, data):
"""Given a list of fields and a (potentially very long) iterable of
tuples matching those fields, perform a complete upload to Salesforce"""
self.create()
for chunk in chunked(data, self.PUBLISHING_BATCH_SIZE):
if chunk:
self.add_batch(fields, chunk)
if not self.pending_batches:
logger.info('No batches added to job.')
self.abort()
return
self.close()
self.wait()
def create(self):
"""Creates a new Salesforce bulk Job and prepares for adding batches."""
assert not self.job, 'The current job is still open.'
logger.info('Creating new job to %s %s', self.operation, self.object_name)
job_request = '''<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="{NAMESPACE}">
<operation>{operation}</operation>
<object>{object_name}</object>
'''
if self.operation == 'upsert':
job_request += '<externalIdFieldName>{external_id_field}</externalIdFieldName>'
job_request += '''
<contentType>CSV</contentType>
</jobInfo>
'''
job_request = job_request.format(
NAMESPACE=NAMESPACE,
object_name=self.object_name,
operation=self.operation,
external_id_field=self.external_id_field
)
response = self.request('post', self.async_url + 'job',
data=job_request)
self.job = bulk_response_attribute(response, 'id')
self.job_url = self.async_url + 'job/' + self.job
self.pending_batches = []
self.is_open = True
def add_batch(self, fields, data):
"""Given a list of fields and an iterable of tuples matching those
fields, adds a batch of data to the current job. The data must be
shorter than PUBLISHING_BATCH_SIZE rows"""
assert self.job, 'There is no current job.'
assert self.is_open, 'The current job is not open.'
logger.info('Adding batch to job %s', self.job_url)
response = self.request('post', self.job_url + '/batch',
data=itercsv(fields, data),
content_type='text/csv; charset=UTF-8')
batch = bulk_response_attribute(response, 'id')
self.pending_batches.append(batch)
def close(self):
"""Closes the current job, which signals to Salesforce that no further
batches will be added to it."""
logger.info('Closing job %s', self.job_url)
self.set_job_state('Closed')
self.is_open = False
def abort(self):
"""Aborts the current job, and resets the instance"""
logger.info('Aborting job %s', self.job_url)
self.set_job_state('Aborted')
self.reset()
def set_job_state(self, state):
"""Sets the current job to the given state ("Closed" or "Aborted")"""
assert self.job, 'There is no current job.'
assert self.is_open, 'The current job is not open.'
state_request = '''<?xml version="1.0" encoding="UTF-8"?>
<jobInfo xmlns="{NAMESPACE}">
<state>{state}</state>
</jobInfo>
'''.format(NAMESPACE=NAMESPACE, state=state)
self.request('post', self.job_url, data=state_request, expected_response=200)
def wait(self):
"""Waits for all batches of the current job to finish"""
assert self.job, 'There is no current job.'
assert not self.is_open, 'The current job must be closed before waiting.'
self.finished_batches = []
total = len(self.pending_batches)
while self.pending_batches:
finished = []
for i, batch in enumerate(self.pending_batches):
batch_url = self.job_url + '/batch/' + batch
response = self.request('get', batch_url, expected_response=200)
state = bulk_response_attribute(response, 'state')
if state not in {'Queued', 'InProgress'}:
finished.append(i)
log_method = (logger.warn
if state in {'Failed', 'Not Processed'}
else logger.info)
log_method('Batch %s (%s/%s) finished with state %s',
batch_url, total - len(self.pending_batches) + len(finished), total, state)
for i in sorted(finished, reverse=True):
self.finished_batches.append(self.pending_batches.pop(i))
if self.pending_batches:
logger.info('Waiting for %s more batches to complete...', len(self.pending_batches))
time.sleep(10)
def results(self):
assert self.job, 'There is no current job.'
assert not self.is_open, 'The current job must be closed before getting results.'
assert self.finished_batches is not None, 'SalesforceBulkJob.wait() should be called before getting results.'
for batch in self.finished_batches:
result_url = self.job_url + '/batch/' + batch + '/result'
response = self.request('get', result_url, expected_response=200)
reader = csv.reader(StringIO(response.decode('utf-8')))
next(reader) # consume the header row
for id, success, created, error in reader:
yield id, success == 'true', created == 'true', error
def reset(self):
"""Resets the state of this job to that of a new instance. This *does
not* change anything that has happened so far at Salesforce. See
`.abort()` to cancel the currently open job."""
self.is_open = False
self.job = self.job_url = self.pending_batches = self.finished_batches = None
def request(self, method, url,
data=None,
content_type='application/xml; charset=UTF-8',
expected_response=201):
"""Performs an HTTP request against Salesforce's bulk API, and validates
the expected response. Returns the content of the response"""
headers = {'X-SFDC-Session': self.session_id}
kwargs = {'headers': headers}
if data is not None:
headers['Content-Type'] = content_type
kwargs['data'] = data
RETRIES, LAST, WAIT = 3, 2, timedelta(seconds=5)
for retry in range(RETRIES):
try:
response = getattr(requests, method)(url, **kwargs)
except requests.exceptions.ConnectionError:
if retry == LAST:
raise
logger.info('ConnectionError from %r %r. Retrying in %r...',
method, url, WAIT, exc_info=True)
else:
if retry < LAST and response.status_code in (502, 503):
logger.info('%r response from %r %r. Retrying in %r...',
response.status_code, method, url, WAIT)
else:
break
time.sleep(WAIT.total_seconds())
if response.status_code != expected_response:
raise Exception(('Unexpected status {} from '
'Salesforce async API. Details: {}'
).format(response.status_code, response.content))
return response.content
def bulk_response_attribute(response, attribute):
"""Given a Salesforce bulk API response bytes, and the name of an attribute,
find it in the given document, or raise if it isn't present"""
tree = ElementTree.fromstring(response)
value = tree.findtext('{{{}}}{}'.format(NAMESPACE, attribute))
if not value:
raise Exception(('<{}> not found in Salesforce '
'async API response. Response: {}'
).format(attribute, response))
return value
def chunked(iterable, size):
"""Yields chunks of the requested size from the iterable. The final chunk
may be smaller than size"""
if not size:
for item in iterable:
yield item
return
chunk = []
for i, item in enumerate(iterable):
chunk.append(item)
if len(chunk) == size:
yield chunk
chunk = []
if chunk:
yield chunk
def itercsv(headers, data):
"""Given a list of headers name and a (potentially large) iterable of
tuples, yield the lines of a CSV file representing that data"""
buffer = StringIO()
writer = csv.writer(buffer)
for row in chain([headers], data):
writer.writerow(row)
buffer.seek(0)
yield buffer.read().encode('utf-8')
buffer.truncate(0)
buffer.seek(0) | /salesforce_bulk_api-1.2.0.tar.gz/salesforce_bulk_api-1.2.0/salesforce_bulk_api.py | 0.715424 | 0.200714 | salesforce_bulk_api.py | pypi |
from cmd import Cmd
from functools import lru_cache
from typing import List
from simple_salesforce import Salesforce
from tabulate import tabulate
import pprint
from sfcli.util import bulk_change, bulk_delete, write_results, start_session, strtime, print_error
import sys
class Cli(Cmd):
intro = 'Welcome to the Salesforce Command Line Interface. Type "help", or "?" to list commands.\n'
prompt = '(Sf|default)'
def __init__(self):
super(Cli, self).__init__()
self.env = 'default'
self.vs = 44.0
self.threads = 4
self.session = start_session(self.vs, self.env)
self._set_base_url()
def _set_base_url(self):
self.base_url = f'https://{self.session.sf_instance}/services/data/v{self.vs}'
def _update_env(self):
self.session = start_session(self.vs, self.env)
self._set_base_url()
print('environment updated')
def help_setthreads(self):
print('args:\n'
'num_threads: int\n'
'set the number of threads to use in threaded processes like create and update')
def do_setthreads(self, args):
self.threads = args
def help_getthreads(self):
print('prints the number of threads currently set')
def do_getthreads(self, args):
print(self.threads)
def help_getenv(self):
print('show the current environment')
def do_getenv(self, args):
print(self.env)
def help_getversion(self):
print('prints the version of the API currently in use')
def do_getversion(self):
print(self.vs)
def help_setenv(self):
print('args:\n'
'envorionment_suffix: str (e.g. prod)\n'
'Log into a new Salesforce Environment\n'
'based on the environment suffix you input (this suffix will be\n'
'appended to the default environment variable names to retrieve\n'
'the desired variables from the .env file')
def do_setenv(self, env_suffix: str = None):
self.env = env_suffix if env_suffix else 'default'
self.prompt = f"(Sf|{self.env})"
self._update_env()
def help_setversion(self):
print('args:\n'
'version: str (e.g. input 42.0)\n'
'Input the version of the API being used')
def do_setversion(self, args):
self.vs = args
self._update_env()
def _query_star(self, query: str) -> str:
split = query.split()
if len(split) > 2 and split[1] == '*':
object_name = split[3]
all_fields = self._get_object_fields(object_name)
fields = ','.join(all_fields)
star_query = query.replace('*', fields, 1)
return star_query
else:
return query
def _query(self, query: str) -> dict:
final_query = self._query_star(query)
results = self.session.query_all(final_query)
if results:
records = results['records']
for r in records:
del r['attributes']
return records
def help_select(self):
print('type in a SOQL query as you normally would. Results will be\n'
'printed in the console. supports select * from!!')
@print_error
def do_select(self, args):
query = f'select {args}'
records = self._query(query)
if records:
print(tabulate(records, headers='keys'))
@print_error
def do_SELECT(self, args):
self.do_select(args)
def help_download(self):
print('type in a SOQL query after the "download" keyword. Results will be saved in a .csv')
@print_error
def do_download(self, args: str):
records = self._query(args)
if records:
write_results(f'query_{strtime()}', records)
def _get_object_names(self) -> List[list]:
url = f'{self.base_url}/sobjects'
results = self.session._call_salesforce('GET', url)
if results:
objects = results.json()['sobjects']
names = [[o['name']] for o in objects]
return names
else:
return None
def help_objects(self):
print('args:\n'
'filter: Optional[str]\n'
'returns a list of all the objects in the org.\n'
'Optional string parameter to filter the objects by.')
def do_objects(self, args: str):
names = self._get_object_names()
names = [n for n in names if args.lower() in n[0].lower()] if args else names
if names:
print(tabulate(names, headers=['name']))
else:
print('no results')
@lru_cache(maxsize=32)
def _get_object_fields(self, object_name: str) -> dict:
url = f'{self.base_url}/sobjects/{object_name}/describe/'
results = self.session._call_salesforce('GET', url).json()['fields']
return [r['name'] for r in results]
def help_fields(self):
print('args:\n'
'filter: Optional[str]\n'
'Input the name of the object to return the fields for e.g. order\n'
'Optional string parameter to filter the fields by e.g. name.')
def do_fields(self, args: str):
arg_lst = args.split()
object_name = arg_lst[0]
results = self._get_object_fields(object_name)
if len(arg_lst) > 1:
filter = arg_lst[1]
tab_results = [[r] for r in results if filter.lower() in r.lower()]
else:
tab_results =[[r] for r in results]
print(tabulate(tab_results, headers=['field_name']))
def help_update(self):
print('args:\n'
'object: str\n'
'file_name: str\n'
'input the name of the object to update and the name of the file including the path.\n'
'This file should contain columns for the fields you want set on create of these records.\n'
'One of the columns must have the id header and contain the id of the record to be updated.\n'
'Note the columns headers in the file must match the API names of the fields in Salesforce\n'
'Optional parameter to specify the number of threads to use (default is 4)')
@print_error
def do_update(self, args: str):
bulk_change(args, 'PATCH', self.session, self.base_url, self.threads)
def help_create(self):
print('args:\n'
'object: str\n'
'file_name: str\n''input the name of the object to update and the name of the file including the path.\n'
'This file should contain columns for the fields you want set on create of these records.\n'
'Note the columns headers in the file must match the API names of the fields in Salesforce\n'
'Optional parameter to specify the number of threads to use (default is 4)')
@print_error
def do_create(self, args: str):
bulk_change(args, 'POST', self.session, self.base_url, self.threads)
def help_delete(self):
print('args:\n'
'file_name: str\n''input the name of the object to update and the name of the file including the path.\n'
'The file should contain one column titled id with the ids of all the records to delete.\n'
'These records do not need to be of the same type.\n'
'Optional parameter to specify the number of threads to use (default is 4)')
@print_error
def do_delete(self, args: str):
bulk_delete(args, self.session, self.base_url, self.threads)
def help_end(self):
print('Exits the shell')
def do_end(self, args: str = None):
sys.exit() | /salesforcecli-0.0.12.tar.gz/salesforcecli-0.0.12/sfcli/cli.py | 0.51879 | 0.17971 | cli.py | pypi |
from dateutil.tz import tzlocal, tzutc
from datetime import datetime
import logging
import numbers
import six
log = logging.getLogger('salesmachine')
def is_naive(dt):
"""Determines if a given datetime.datetime is naive."""
return dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None
def total_seconds(delta):
"""Determines total seconds with python < 2.7 compat."""
# http://stackoverflow.com/questions/3694835/python-2-6-5-divide-timedelta-with-timedelta
return (delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 1e6) / 1e6
def guess_timezone(dt):
"""Attempts to convert a naive datetime to an aware datetime."""
if is_naive(dt):
# attempts to guess the datetime.datetime.now() local timezone
# case, and then defaults to utc
delta = datetime.now() - dt
if total_seconds(delta) < 5:
# this was created using datetime.datetime.now()
# so we are in the local timezone
return dt.replace(tzinfo=tzlocal())
else:
# at this point, the best we can do is guess UTC
return dt.replace(tzinfo=tzutc())
return dt
def clean(item):
if isinstance(item, (six.string_types, bool, numbers.Number, datetime,
type(None))):
return item
elif isinstance(item, (set, list, tuple)):
return _clean_list(item)
elif isinstance(item, dict):
return _clean_dict(item)
else:
return _coerce_unicode(item)
def _clean_list(list_):
return [clean(item) for item in list_]
def _clean_dict(dict_):
data = {}
for k, v in six.iteritems(dict_):
try:
data[k] = clean(v)
except TypeError:
log.warning('Dictionary values must be serializeable to ' +
'JSON "%s" value %s of type %s is unsupported.'
% (k, v, type(v)))
return data
def _coerce_unicode(cmplx):
try:
item = cmplx.decode("utf-8", "strict")
except AttributeError as exception:
item = ":".join(exception)
item.decode("utf-8", "strict")
log.warning('Error decoding: %s', item)
return None
except:
raise
return item | /salesmachine-python-1.0.1.tar.gz/salesmachine-python-1.0.1/salesmachine/utils.py | 0.743634 | 0.26182 | utils.py | pypi |
import re
import datetime
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class SalesManagoEventData:
'''
Class for interfacing with event instances of SalesManago platform.
Structure is valid for following API actions: /api/v2/contact/addContactExtEvent
'''
contactExtEventType: str
_contactExtEventType: str = field(init=False, repr=False, default=None)
eventDate: int
_eventDate: int = field(init=False, repr=False, default=None)
owner: str
_owner: str = field(init=False, repr=False, default=None)
contactId: Optional[str] = None
email: Optional[str] = None
_email: str = field(init=False, repr=False, default=None)
#extra stuff
forceOptIn: Optional[bool] = None
description: Optional[str] = None
products: Optional[str] = None
location: Optional[str] = None
value: Optional[str] = None
detail1: Optional[str] = None
detail2: Optional[str] = None
detail3: Optional[str] = None
detail4: Optional[str] = None
detail5: Optional[str] = None
detail6: Optional[str] = None
detail7: Optional[str] = None
detail8: Optional[str] = None
detail9: Optional[str] = None
detail10: Optional[str] = None
detail11: Optional[str] = None
detail12: Optional[str] = None
detail13: Optional[str] = None
detail14: Optional[str] = None
detail15: Optional[str] = None
detail16: Optional[str] = None
detail17: Optional[str] = None
detail18: Optional[str] = None
detail19: Optional[str] = None
detail20: Optional[str] = None
externalId: Optional[str] = None
shopDomain: Optional[str] = None
VALID_EVENTS = [
'PURCHASE', 'CART', 'VISIT', 'PHONE_CALL', 'OTHER', 'RESERVATION', 'CANCELLED',
'ACTIVATION', 'MEETING', 'OFFER', 'DOWNLOAD', 'LOGIN', 'TRANSACTION', 'CANCELLATION',
'RETURN', 'SURVEY', 'APP_STATUS', 'APP_TYPE_WEB', 'APP_TYPE_MANUAL', 'APP_TYPE_RETENTION',
'APP_TYPE_UPSALE', 'LOAN_STATUS', 'LOAN_ORDER', 'FIRST_LOAN', 'REPEATED_LOAN'
]
def __post_init__(self):
if not self.owner:
raise ValueError('owner[str] is required')
if not self.email and not self.contactId:
raise ValueError('email[str] or contactId[str] is required')
if not self.eventDate:
raise ValueError('eventDate[int] timestamp is required')
if not self.contactExtEventType:
raise ValueError('contactExtEventType[str] is required')
@property
def email(self) -> str:
return self._email
@email.setter
def email(self, email: str) -> None:
if type(email) is property:
email = self._email
if email and not self._validate_email(email):
raise ValueError(' %s is not valid CLIENT e-mail' % email)
self._email = email
@property
def owner(self) -> str:
return self._owner
@owner.setter
def owner(self, owner: str) -> None:
if type(owner) is property:
owner = self._owner
if owner and not self._validate_email(owner):
raise ValueError('%s is not valid OWNER e-mail' % owner)
self._owner = owner
def _validate_email(self, email):
mailre = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
return mailre.match(email)
@property
def eventDate(self) -> str:
return self._eventDate
@eventDate.setter
def eventDate(self, eventDate: int) -> None:
if type(eventDate) is property:
eventDate = self._eventDate
if eventDate and not isinstance(eventDate, int):
raise TypeError('eventDate should be int, not %s' % type(eventDate))
self._eventDate = eventDate
@property
def contactExtEventType(self) -> str:
return self._contactExtEventType
@contactExtEventType.setter
def contactExtEventType(self, contactExtEventType: str) -> None:
if type(contactExtEventType) is property:
contactExtEventType = self._contactExtEventType
if contactExtEventType and contactExtEventType not in self.VALID_EVENTS:
raise ValueError('contactExtEventType should be on of %s' % self.VALID_EVENTS)
self._contactExtEventType = contactExtEventType
def requestDict(self) -> dict:
ALL_ATTRS = [
'owner', 'email', 'contactId', 'forceOptIn'
]
CONTACT_EVENT_ATTRS = [
'description', 'products', 'location', 'value', 'externalId', 'shopDomain'
]
CONTACT_EVENT_ATTRS.extend(['detail%s' % did for did in range(1,21)])
rdata = {
k: getattr(self, k) for k in ALL_ATTRS if hasattr(self, k) and getattr(self, k) is not None
}
rdata['contactEvent'] = {
k: getattr(self, k) for k in CONTACT_EVENT_ATTRS if hasattr(self, k) and getattr(self, k) is not None
}
rdata['contactEvent']["contactExtEventType"] = self.contactExtEventType
rdata['contactEvent']["date"] = self.eventDate
return rdata | /salesmanago_python_api-0.4.tar.gz/salesmanago_python_api-0.4/salesmanago_python_api/data/event.py | 0.802981 | 0.183045 | event.py | pypi |
import re
import datetime
from typing import Optional
from dataclasses import dataclass, field
@dataclass
class SalesManagoClientData:
'''
Class for interfacing with client instances of SalesManago platform.
Structure was valid for following API actions: insert / upsert / update / batchupsert
'''
email: str
_email: str = field(init=False, repr=False, default=None)
owner: str
_owner: str = field(init=False, repr=False, default=None)
state: Optional[str] = None
_state: str = field(init=False, repr=False, default=None)
name: Optional[str] = None
phone: Optional[str] = None
fax: Optional[str] = None
company: Optional[str] = None
externalId: Optional[str] = None
newEmail: Optional[str] = None
_newEmail: str = field(init=False, repr=False, default=None)
birthday: Optional[datetime.date]
_birthday: datetime.date = field(init=False, repr=False, default=None)
address_streetAddress: Optional[str] = None
address_zipCode: Optional[str] = None
address_city: Optional[str] = None
address_country: Optional[str] = None
lang: Optional[str] = None
tags: list = field(default_factory=list)
removeTags: list = field(default_factory=list)
forceOptOut: bool = None
forceOptIn: bool = None
forcePhoneOptOut: bool = None
forcePhoneOptIn: bool = None
useApiDoubleOptIn: bool = None
properties: dict = field(default_factory=dict)
province: str = None
VALID_STATES = ['CUSTOMER', 'PROSPECT', 'PARTNER', 'OTHER', 'UNKNOWN']
def __post_init__(self):
if not self.owner:
raise ValueError('owner is required')
if self.tags and not isinstance(self.tags, list):
raise TypeError('tags should be a list')
if self.removeTags and not isinstance(self.removeTags, list):
raise TypeError('removeTags should be a list')
if self.properties and not isinstance(self.properties, dict):
raise TypeError('properties should be a list')
def _validate_email(self, email):
mailre = re.compile(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)")
return mailre.match(email)
@property
def email(self) -> str:
return self._email
@email.setter
def email(self, email: str) -> None:
if type(email) is property:
email = self._email
if email and not self._validate_email(email):
raise ValueError(' %s is not valid CLIENT e-mail' % email)
self._email = email
@property
def owner(self) -> str:
return self._owner
@owner.setter
def owner(self, owner: str) -> None:
if type(owner) is property:
owner = self._owner
if owner and not self._validate_email(owner):
raise ValueError('%s is not valid OWNER e-mail' % owner)
self._owner = owner
@property
def newEmail(self) -> str:
return self._newEmail
@newEmail.setter
def newEmail(self, newEmail: str) -> None:
if type(newEmail) is property:
newEmail = self._newEmail
if newEmail and not self._validate_email(newEmail):
raise ValueError(' %s is not valid NEW e-mail' % self.newEmail)
self._newEmail = newEmail
@property
def birthday(self) -> str:
return self._birthday
@birthday.setter
def birthday(self, birthday: datetime.date) -> None:
if type(birthday) is property:
birthday = self._birthday
if birthday and not isinstance(birthday, datetime.date):
raise TypeError('birthday should be datetime.date, not %s' % type(birthday))
self._birthday = birthday
@property
def birthDateConverted(self):
if not self._birthday:
return None
return self._birthday.strftime('%Y%m%d')
@property
def state(self) -> str:
return self._state
@state.setter
def state(self, state: str) -> None:
if type(state) is property:
state = self._state
if state and state not in self.VALID_STATES:
raise ValueError('state should be on of %s' % self.VALID_STATES)
self._state = state
def add_tag(self, tag:str) -> None:
self.tags.append(tag)
def remove_tag(self, tag:str) -> None:
self.tags.remove(tag)
def add_removeTags(self, tag:str) -> None:
self.removeTags.append(tag)
def remove_removeTags(self, tag:str) -> None:
self.removeTags.remove(tag)
def add_property(self, key: str, value: str) -> None:
self.properties[key] = value
def remove_property(self, key: str) -> None:
del self.properties[key]
@property
def address(self) -> dict:
ADDRESS_ATTRS = [
'address_streetAddress',
'address_zipCode',
'address_city',
'address_country',
]
if not any([getattr(self, key) for key in ADDRESS_ATTRS]):
return None
return {
k.replace('address_', ''): getattr(self, k) for k in ADDRESS_ATTRS if getattr(self, k)
}
def contact(self, request_format) -> dict:
CONTACT_ATTRS = [
'email',
'fax',
'name',
'phone',
'company',
'state',
'address'
]
if request_format == 'update':
CONTACT_ATTRS.remove('email')
return {
k: getattr(self, k) for k in CONTACT_ATTRS if getattr(self, k) is not None
}
ALLOWED_FORMATS = ['insert', 'update', 'delete', 'upsert']
def requestDict(self, request_format) -> dict:
if request_format not in self.ALLOWED_FORMATS:
raise ValueError('Allowed formats are %s' % self.ALLOWED_FORMATS)
#delete is super short
if request_format == 'delete':
return {
'email': self.email,
'owner': self.owner
}
ALL_ATTRS = [
'owner', 'externalId', 'newEmail', 'lang', 'forceOptOut', 'forceOptIn',
'forcePhoneOptOut', 'forcePhoneOptIn', 'useApiDoubleOptIn', 'province', 'birthday'
]
ITERABLE_ATTRS = [
'tags', 'removeTags', 'properties'
]
rdata = {
k: getattr(self, k) for k in ALL_ATTRS if hasattr(self, k) and getattr(self, k) is not None
}
rdata.update({
k: getattr(self, k) for k in ITERABLE_ATTRS if any(getattr(self, k))
})
c_data = self.contact(request_format)
if any(c_data):
rdata['contact'] = c_data
if self.birthday:
rdata['birthday'] = self.birthDateConverted
#those requests need to have email on them ...
if request_format == 'update':
rdata['email'] = self.email
if request_format == 'upsert':
rdata['email'] = self.email
return rdata | /salesmanago_python_api-0.4.tar.gz/salesmanago_python_api-0.4/salesmanago_python_api/data/client.py | 0.793266 | 0.154249 | client.py | pypi |
# salespyforce
A Python toolset for performing Salesforce API calls
<table>
<tr>
<td>Latest Stable Release</td>
<td>
<a href='https://pypi.org/project/salespyforce/'>
<img alt="PyPI" src="https://img.shields.io/pypi/v/salespyforce">
</a>
</td>
</tr>
<tr>
<td>Latest Beta/RC Release</td>
<td>
<a href='https://pypi.org/project/salespyforce/#history'>
<img alt="PyPI" src="https://img.shields.io/badge/pypi-1.0.0b1-blue">
</a>
</td>
</tr>
<tr>
<td>Build Status</td>
<td>
<a href="https://github.com/jeffshurtliff/salespyforce/blob/master/.github/workflows/pythonpackage.yml">
<img alt="GitHub Workflow Status"
src="https://img.shields.io/github/actions/workflow/status/jeffshurtliff/salespyforce/pythonpackage.yml?branch=master">
</a>
</td>
</tr>
<tr>
<td>Supported Versions</td>
<td>
<a href='https://pypi.org/project/salespyforce/'>
<img alt="PyPI - Python Version" src="https://img.shields.io/pypi/pyversions/salespyforce">
</a>
</td>
</tr>
<tr>
<td>Code Coverage</td>
<td>
<a href="https://codecov.io/gh/jeffshurtliff/salespyforce">
<img src="https://codecov.io/gh/jeffshurtliff/salespyforce/branch/master/graph/badge.svg" />
</a>
</td>
</tr>
<tr>
<td>Documentation</td>
<td>
<a href='https://salespyforce.readthedocs.io/en/latest/?badge=latest'>
<img src='https://readthedocs.org/projects/salespyforce/badge/?version=latest' alt='Documentation Status' />
</a>
</td>
</tr>
<tr>
<td>Security Audits</td>
<td>
<a href="https://github.com/marketplace/actions/python-security-check-using-bandit">
<img alt="Bandit" src="https://img.shields.io/badge/security-bandit-yellow.svg">
</a>
</td>
</tr>
<tr>
<td>License</td>
<td>
<a href="https://github.com/jeffshurtliff/salespyforce/blob/master/LICENSE">
<img alt="License (GitHub)" src="https://img.shields.io/github/license/jeffshurtliff/salespyforce">
</a>
</td>
</tr>
<tr>
<td style="vertical-align: top;">Issues</td>
<td>
<a href="https://github.com/jeffshurtliff/salespyforce/issues">
<img style="margin-bottom:5px;" alt="GitHub open issues" src="https://img.shields.io/github/issues-raw/jeffshurtliff/salespyforce"><br />
</a>
<a href="https://github.com/jeffshurtliff/salespyforce/issues">
<img alt="GitHub closed issues" src="https://img.shields.io/github/issues-closed-raw/jeffshurtliff/salespyforce">
</a>
</td>
</tr>
<tr>
<td style="vertical-align: top;">Pull Requests</td>
<td>
<a href="https://github.com/jeffshurtliff/salespyforce/pulls">
<img style="margin-bottom:5px;" alt="GitHub pull open requests" src="https://img.shields.io/github/issues-pr-raw/jeffshurtliff/salespyforce"><br />
</a>
<a href="https://github.com/jeffshurtliff/salespyforce/pulls">
<img alt="GitHub closed pull requests" src="https://img.shields.io/github/issues-pr-closed-raw/jeffshurtliff/salespyforce">
</a>
</td>
</tr>
</table>
## Installation
The package can be installed via pip using the syntax below.
```sh
pip install salespyforce --upgrade
```
You may also clone the repository and install from source using below.
```sh
git clone git://github.com/jeffshurtliff/salespyforce.git
cd salespyforce/
python setup.py install
```
## Change Log
The change log can be found in the [documentation](https://salespyforce.readthedocs.io/en/latest/changelog.html).
## Usage
This section provides basic usage instructions for the package.
### Importing the package
Rather than importing the base package, it is recommended that you import the primary `Salesforce` class using the
syntax below.
```python
from salespyforce import Salesforce
```
### Initializing a Khoros object instance
The primary `Salesforce` object serves many purposes, the most important being to establish a connection to the
Salesforce environment with which you intend to interact. As such, when initializing an instance of the `Salesforce`
object, you will need to pass it the following information:
* The username and password of the API user
* The Organization ID of the Salesforce environment
* The Base URL and Endpoint URL
* The client ID, client secret, and security token
The `Salesforce` object can be initiated in two different ways:
* Passing the information directly into the object
* Leveraging a "helper" configuration file
#### Passing the information directly into the object
The environment and connection information can be passed directly into the `Salesforce` object when initializing it,
as demonstrated in the example below.
```python
sfdc = Salesforce(
username='admin.user@example.com',
password='example123',
org_id='4DJ000000CeMFYA0',
base_url='https://example-dev-ed.lightning.force.com/',
endpoint_url='https://example-dev-ed.my.salesforce.com/services/oauth2/token',
client_id='3MVG9gTv.DiE8cKRIpEtSN_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_TAoy1Zk_AKGukbqa4KbhM6nVYVUu6md',
client_secret='7536F4A7865559XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX53797BEA88174713CC3C',
security_token='2muXaXXXXXXXXXXXXXXXoVKxz'
)
```
#### Leveraging a "helper" configuration file
As an alternative to passing the connection information to the `Salesforce` class in the way demonstrated above, a
"helper" configuration file in `yaml` or `json` format can be leveraged instead and passed to the `Salesforce` class
when initializing the object.
This is an example of how the configuration file would be written in YAML format:
```yaml
# Helper configuration file for the SalesPyForce package
# Define how to obtain the connection information
connection:
# Define the credentials
username: admin.user@example.com
password: example123
# Define the org information
org_id: 4DJ000000CeMFYA0
base_url: https://example-dev-ed.lightning.force.com/
endpoint_url: https://example-dev-ed.my.salesforce.com/services/oauth2/token
# Define the API connection info
client_key: 3MVG9gTv.DiE8cKRIpEtSN_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX_TAoy1Zk_AKGukbqa4KbhM6nVYVUu6md
client_secret: 7536F4A7865559XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX53797BEA88174713CC3C
security_token: 2muXaXXXXXXXXXXXXXXXoVKxz
# Define if SSL certificates should be verified when making API calls
ssl_verify: yes
```
The file can then be referenced using the `helper` argument when initializing the object instance, as shown below.
```python
HELPER_FILE = '/path/to/helper.yml'
sfdc = Salesforce(helper=HELPER_FILE)
```
## Documentation
The documentation is located here: [https://salespyforce.readthedocs.io/en/latest/](https://salespyforce.readthedocs.io/en/latest/)
## License
[MIT License](https://github.com/jeffshurtliff/salespyforce/blob/master/LICENSE)
## Reporting Issues
Issues can be reported within the [GitHub repository](https://github.com/jeffshurtliff/salespyforce/issues).
## Donations
If you would like to donate to this project then you can do so using [this PayPal link](https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=XDZ8M6UV6EFK6&item_name=SalesPyForce+Python+API¤cy_code=USD).
## Disclaimer
This package is considered unofficial and is in no way endorsed or supported by [Salesforce Inc](https://www.salesforce.com).
| /salespyforce-1.2.1.tar.gz/salespyforce-1.2.1/README.md | 0.50952 | 0.944791 | README.md | pypi |
# Salesvision API
Salesvision API provides accurate, reliable and scalable fashion image analysis by endpoints.
It includes free public methods and python API client
# Fashion analysis
Fashion analysis includes recognition of items:
- Category
- Tags/attributes
- Mask
- Color
## Category detection
The following solution will detect 27 categories:

## Tag/attribute recognition
The solution will specify tags from 7 main categories and styles such as described below:

On the real world images will have such output for different fashion items:

## Color extraction
Here is example of how color is been extracted from the fashion item mask covered area:

# Public methods
### Curl HTTP Request
> Public endpoint for fashion analysis with time measurement
```shell
`IMG_URL=https%3A%2F%2Fis4.revolveassets.com%2Fimages%2Fp4%2Fn%2Fd%2FCAME-WD114_V1.jpg;
time curl -X GET "http://87.117.25.190:5015/fashion_analysis/?url=$IMG_URL"`
```
> From office local network
```shell
`IMG_URL=https%3A%2F%2Fis4.revolveassets.com%2Fimages%2Fp4%2Fn%2Fd%2FCAME-WD114_V1.jpg;
time curl -X GET "http://192.168.0.125:5015/fashion_analysis/?url=$IMG_URL"`
```
## Try it yourself at:
[office local network access link](http://192.168.0.125:5015/docs#/default/analysis_fashion_analysis__get)
[global network access link](http://87.117.25.190:5015/docs#/default/analysis_fashion_analysis__get)
Parameter | Default | Description
--------- | ------- | -----------
url | true | fashion image url.
*The `json` output will contain result list where each object is a recognized fashion item*
Each object will include:
- 1 of 27 fashion classes
- Tags description in 7 main categories
- Mask of the recognized item
- 5 main colors extracted from item mask covered area
- Color embedding which can be used later on with fashion item search by color
## Visual analysis method
> The above command returns JSON structured like this:
```json
{
"result": [
{
"category": "pants",
"description": {
"tags": {
"length": "maxi (length)",
"nickname": "jeans",
"opening type": "fly (opening)",
"silhouette": "regular (fit)",
"textile finishing, manufacturing techniques": "washed",
"textile pattern": "plain (pattern)",
"waistline": "low waist"
},
"colors": [
"172839",
...
]
},
"color_embedding": [
-0.45759817957878113,
...
]
},
{
"category": "top, t-shirt, sweatshirt",
"description": {
"tags": {
"length": "above-the-hip (length)",
"nickname": "classic (t-shirt)",
"opening type": "no opening",
"silhouette": "symmetrical",
"textile finishing, manufacturing techniques": "printed",
"textile pattern": "plain (pattern)",
"waistline": "no waistline"
},
"colors": [
"321d1a",
...
]
},
"color_embedding": [
-0.5404209494590759,
...
]
},
{
"category": "shoe",
"description": {
"colors": [
"161615",
...
]
},
"color_embedding": [
-0.5041476488113403,
...
]
},
{
"category": "headband, head covering, hair accessory",
"description": {
"colors": [
"35261b",
...
]
},
"color_embedding": [
-0.5759932398796082,
...
]
}
]
}
```
That method allows us to check accuracy of given masks and bounding boxes recognition by the given image:

### Provided attributes for the recognized `"category": "pants"`:
Attribute | Predicted tag
--------- | -------------
length | maxi
nickname | jeans
opening type | fly
silhouette | regular (fit)
textile finishing, manufacturing techniques | washed
textile pattern | plain
waistline | low waist
## Try it yourself at:
[office local network access link](http://192.168.0.125:5015/docs#/default/analysis_visual_fashion_analysis__get)
[global network access link](http://192.168.0.125:5015/docs#/default/analysis_visual_fashion_analysis__get)
# Client API
## Setup
You can easily setup our SDK with python 3.x language
> Install pip package (under development)
```shell
pip install salesvision
```
## Authentication
> Example of authentification process:
> will be replaced with fastapi oauth2
```python
from salesvision import Salesvision
# connect to Salesvision module to work with its API
api = Salesvision(api_url='https://salesvision.com/api', api_key='Your_API_key', secret)
```
> Make sure to replace `v` with your API key.
Salesvision API will be probably using oAuth2 for authentification process
`Authorization: Your_API_key`
<aside class="notice">
You must replace <code>Your_API_key</code> with your personal API key.
</aside>
## Fashion analysis
```python
from salesvision import Salesvision
# connect to Salesvision module to work with its API
api = Salesvision(api_url='https://salesvision.com/api', api_key='Your_API_key', secret)
# image can be rather url or local stored file
results = api.fashion_analysis(image)
```
> The above command returns JSON structured like this:
```json
{
"result": [
{
"category": "pants",
"description": {
"tags": {
"length": "maxi (length)",
"nickname": "jeans",
"opening type": "fly (opening)",
"silhouette": "regular (fit)",
"textile finishing, manufacturing techniques": "washed",
"textile pattern": "plain (pattern)",
"waistline": "low waist"
},
"colors": [
"172839",
...
]
},
"color_embedding": [
-0.45759817957878113,
...
]
},
{
"category": "top, t-shirt, sweatshirt",
"description": {
"tags": {
"length": "above-the-hip (length)",
"nickname": "classic (t-shirt)",
"opening type": "no opening",
"silhouette": "symmetrical",
"textile finishing, manufacturing techniques": "printed",
"textile pattern": "plain (pattern)",
"waistline": "no waistline"
},
"colors": [
"321d1a",
...
]
},
"color_embedding": [
-0.5404209494590759,
...
]
},
{
"category": "shoe",
"description": {
"colors": [
"161615",
...
]
},
"color_embedding": [
-0.5041476488113403,
...
]
},
{
"category": "headband, head covering, hair accessory",
"description": {
"colors": [
"35261b",
...
]
},
"color_embedding": [
-0.5759932398796082,
...
]
}
]
}
```
Under the hood POST request is used
### Query Parameters
Parameter | Default | Description
--------- | ------- | -----------
file: | false | file in binary format.
*The output of this method will contain result list where each object is a recognized fashion item*
Each object will include:
- 1 of 27 fashion classes
- Tags description in 7 main categories
- Mask of the recognized item
- 5 main colors extracted from item mask covered area
- Color embedding which can be used later on with fashion item search by color
<aside class="success">
This method used with client API handles image file paths and urls
</aside>
| /salesvision-0.0.3.tar.gz/salesvision-0.0.3/README.md | 0.519765 | 0.925297 | README.md | pypi |
import torch
from collections import OrderedDict
from torch.nn import utils, functional as F
from torch.optim import Adam
from torch.autograd import Variable
from torch.backends import cudnn
from networks.poolnet import build_model, weights_init
import scipy.misc as sm
import numpy as np
import os
import torchvision.utils as vutils
import cv2
import math
import time
class Solver(object):
def __init__(self, train_loader, test_loader, config):
self.train_loader = train_loader
self.test_loader = test_loader
self.config = config
self.iter_size = config.iter_size
self.show_every = config.show_every
self.lr_decay_epoch = [15,]
self.build_model()
if config.mode == 'test':
print('Loading pre-trained model from %s...' % self.config.model)
if self.config.cuda:
self.net.load_state_dict(torch.load(self.config.model))
else:
self.net.load_state_dict(torch.load(self.config.model, map_location='cpu'))
self.net.eval()
# print the network information and parameter numbers
def print_network(self, model, name):
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(name)
print(model)
print("The number of parameters: {}".format(num_params))
# build the network
def build_model(self):
self.net = build_model(self.config.arch)
if self.config.cuda:
self.net = self.net.cuda()
# self.net.train()
self.net.eval() # use_global_stats = True
self.net.apply(weights_init)
if self.config.load == '':
self.net.base.load_pretrained_model(torch.load(self.config.pretrained_model))
else:
self.net.load_state_dict(torch.load(self.config.load))
self.lr = self.config.lr
self.wd = self.config.wd
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
self.print_network(self.net, 'PoolNet Structure')
def test(self):
mode_name = 'sal_fuse'
time_s = time.time()
img_num = len(self.test_loader)
for i, data_batch in enumerate(self.test_loader):
images, name, im_size = data_batch['image'], data_batch['name'][0], np.asarray(data_batch['size'])
with torch.no_grad():
images = Variable(images)
if self.config.cuda:
images = images.cuda()
preds = self.net(images)
pred = np.squeeze(torch.sigmoid(preds).cpu().data.numpy())
multi_fuse = 255 * pred
cv2.imwrite(os.path.join(self.config.test_fold, name[:-4] + '_' + mode_name + '.png'), multi_fuse)
time_e = time.time()
print('Speed: %f FPS' % (img_num/(time_e-time_s)))
print('Test Done!')
# training phase
def train(self):
iter_num = len(self.train_loader.dataset) // self.config.batch_size
aveGrad = 0
for epoch in range(self.config.epoch):
r_sal_loss= 0
self.net.zero_grad()
for i, data_batch in enumerate(self.train_loader):
sal_image, sal_label = data_batch['sal_image'], data_batch['sal_label']
if (sal_image.size(2) != sal_label.size(2)) or (sal_image.size(3) != sal_label.size(3)):
print('IMAGE ERROR, PASSING```')
continue
sal_image, sal_label= Variable(sal_image), Variable(sal_label)
if self.config.cuda:
# cudnn.benchmark = True
sal_image, sal_label = sal_image.cuda(), sal_label.cuda()
sal_pred = self.net(sal_image)
sal_loss_fuse = F.binary_cross_entropy_with_logits(sal_pred, sal_label, reduction='sum')
sal_loss = sal_loss_fuse / (self.iter_size * self.config.batch_size)
r_sal_loss += sal_loss.data
sal_loss.backward()
aveGrad += 1
# accumulate gradients as done in DSS
if aveGrad % self.iter_size == 0:
self.optimizer.step()
self.optimizer.zero_grad()
aveGrad = 0
if i % (self.show_every // self.config.batch_size) == 0:
if i == 0:
x_showEvery = 1
print('epoch: [%2d/%2d], iter: [%5d/%5d] || Sal : %10.4f' % (
epoch, self.config.epoch, i, iter_num, r_sal_loss/x_showEvery))
print('Learning rate: ' + str(self.lr))
r_sal_loss= 0
if (epoch + 1) % self.config.epoch_save == 0:
torch.save(self.net.state_dict(), '%s/models/epoch_%d.pth' % (self.config.save_folder, epoch + 1))
if epoch in self.lr_decay_epoch:
self.lr = self.lr * 0.1
self.optimizer = Adam(filter(lambda p: p.requires_grad, self.net.parameters()), lr=self.lr, weight_decay=self.wd)
torch.save(self.net.state_dict(), '%s/models/final.pth' % self.config.save_folder)
def bce2d(input, target, reduction=None):
assert(input.size() == target.size())
pos = torch.eq(target, 1).float()
neg = torch.eq(target, 0).float()
num_pos = torch.sum(pos)
num_neg = torch.sum(neg)
num_total = num_pos + num_neg
alpha = num_neg / num_total
beta = 1.1 * num_pos / num_total
# target pixel = 1 -> weight beta
# target pixel = 0 -> weight 1-beta
weights = alpha * pos + beta * neg
return F.binary_cross_entropy_with_logits(input, target, weights, reduction=reduction) | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/solver.py | 0.675978 | 0.323086 | solver.py | pypi |
import os
from PIL import Image
import cv2
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.transforms import functional as F
import numbers
import numpy as np
import random
class ImageDataTrain(data.Dataset):
def __init__(self, data_root, data_list):
self.sal_root = data_root
self.sal_source = data_list
with open(self.sal_source, 'r') as f:
self.sal_list = [x.strip() for x in f.readlines()]
self.sal_num = len(self.sal_list)
def __getitem__(self, item):
# sal data loading
im_name = self.sal_list[item % self.sal_num].split()[0]
gt_name = self.sal_list[item % self.sal_num].split()[1]
sal_image = load_image(os.path.join(self.sal_root, im_name))
sal_label = load_sal_label(os.path.join(self.sal_root, gt_name))
sal_image, sal_label = cv_random_flip(sal_image, sal_label)
sal_image = torch.Tensor(sal_image)
sal_label = torch.Tensor(sal_label)
sample = {'sal_image': sal_image, 'sal_label': sal_label}
return sample
def __len__(self):
return self.sal_num
class ImageDataTest(data.Dataset):
def __init__(self, data_root, data_list):
self.data_root = data_root
self.data_list = data_list
with open(self.data_list, 'r') as f:
self.image_list = [x.strip() for x in f.readlines()]
self.image_num = len(self.image_list)
def __getitem__(self, item):
image, im_size = load_image_test(os.path.join(self.data_root, self.image_list[item]))
image = torch.Tensor(image)
return {'image': image, 'name': self.image_list[item % self.image_num], 'size': im_size}
def __len__(self):
return self.image_num
def get_loader(config, mode='train', pin=False):
shuffle = False
if mode == 'train':
shuffle = True
dataset = ImageDataTrain(config.train_root, config.train_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
else:
dataset = ImageDataTest(config.test_root, config.test_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
return data_loader
def load_image(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_
def load_image_test(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
im_size = tuple(in_.shape[:2])
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_, im_size
def load_sal_label(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label = label[np.newaxis, ...]
return label
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img[:,:,::-1].copy()
label = label[:,:,::-1].copy()
return img, label | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/dataset/dataset.py | 0.455441 | 0.354601 | dataset.py | pypi |
import os
from PIL import Image
import cv2
import torch
from torch.utils import data
from torchvision import transforms
from torchvision.transforms import functional as F
import numbers
import numpy as np
import random
class ImageDataTrain(data.Dataset):
def __init__(self, sal_data_root, sal_data_list, edge_data_root, edge_data_list):
self.sal_root = sal_data_root
self.sal_source = sal_data_list
self.edge_root = edge_data_root
self.edge_source = edge_data_list
with open(self.sal_source, 'r') as f:
self.sal_list = [x.strip() for x in f.readlines()]
with open(self.edge_source, 'r') as f:
self.edge_list = [x.strip() for x in f.readlines()]
self.sal_num = len(self.sal_list)
self.edge_num = len(self.edge_list)
def __getitem__(self, item):
# edge data loading
edge_im_name = self.edge_list[item % self.edge_num].split()[0]
edge_gt_name = self.edge_list[item % self.edge_num].split()[1]
edge_image = load_image(os.path.join(self.edge_root, edge_im_name))
edge_label = load_edge_label(os.path.join(self.edge_root, edge_gt_name))
edge_image = torch.Tensor(edge_image)
edge_label = torch.Tensor(edge_label)
# sal data loading
sal_im_name = self.sal_list[item % self.sal_num].split()[0]
sal_gt_name = self.sal_list[item % self.sal_num].split()[1]
sal_image = load_image(os.path.join(self.sal_root, sal_im_name))
sal_label = load_sal_label(os.path.join(self.sal_root, sal_gt_name))
sal_image, sal_label = cv_random_flip(sal_image, sal_label)
sal_image = torch.Tensor(sal_image)
sal_label = torch.Tensor(sal_label)
sample = {'edge_image': edge_image, 'edge_label': edge_label, 'sal_image': sal_image, 'sal_label': sal_label}
return sample
def __len__(self):
return max(self.sal_num, self.edge_num)
class ImageDataTest(data.Dataset):
def __init__(self, input_image_paths):
self.input_image_paths = input_image_paths
self.image_num = len(self.input_image_paths)
def __getitem__(self, item):
image, im_size = load_image_test(self.input_image_paths[item])
image = torch.Tensor(image)
input_image_filename = self.input_image_paths[item % self.image_num].name
return {'image': image, 'name': input_image_filename, 'size': im_size}
def __len__(self):
return self.image_num
def get_loader(config, mode='train', pin=False):
shuffle = False
if mode == 'train':
shuffle = True
dataset = ImageDataTrain(config.train_root, config.train_list, config.train_edge_root, config.train_edge_list)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
else:
dataset = ImageDataTest(config.image_paths)
data_loader = data.DataLoader(dataset=dataset, batch_size=config.batch_size, shuffle=shuffle, num_workers=config.num_thread, pin_memory=pin)
return data_loader
def load_image(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(path)
in_ = np.array(im, dtype=np.float32)
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_
def load_image_test(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = cv2.imread(str(path))
in_ = np.array(im, dtype=np.float32)
im_size = tuple(in_.shape[:2])
in_ -= np.array((104.00699, 116.66877, 122.67892))
in_ = in_.transpose((2,0,1))
return in_, im_size
def load_sal_label(path):
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label = label[np.newaxis, ...]
return label
def load_edge_label(path):
"""
pixels > 0.5 -> 1.
"""
if not os.path.exists(path):
print('File {} not exists'.format(path))
im = Image.open(path)
label = np.array(im, dtype=np.float32)
if len(label.shape) == 3:
label = label[:,:,0]
label = label / 255.
label[np.where(label > 0.5)] = 1.
label = label[np.newaxis, ...]
return label
def cv_random_flip(img, label):
flip_flag = random.randint(0, 1)
if flip_flag == 1:
img = img[:,:,::-1].copy()
label = label[:,:,::-1].copy()
return img, label | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/dataset/joint_dataset.py | 0.466846 | 0.342242 | joint_dataset.py | pypi |
import torch.nn as nn
import math
import torch
import numpy as np
import torch.nn.functional as F
affine_par = True
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes, affine = affine_par)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes, affine = affine_par)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, dilation_ = 1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change
self.bn1 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
padding = 1
if dilation_ == 2:
padding = 2
elif dilation_ == 4:
padding = 4
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change
padding=padding, bias=False, dilation = dilation_)
self.bn2 = nn.BatchNorm2d(planes,affine = affine_par)
for i in self.bn2.parameters():
i.requires_grad = False
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4, affine = affine_par)
for i in self.bn3.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64,affine = affine_par)
for i in self.bn1.parameters():
i.requires_grad = False
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation__ = 2)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1,dilation__ = 1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion or dilation__ == 2 or dilation__ == 4:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion,affine = affine_par),
)
for i in downsample._modules['1'].parameters():
i.requires_grad = False
layers = []
layers.append(block(self.inplanes, planes, stride,dilation_=dilation__, downsample = downsample ))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes,dilation_=dilation__))
return nn.Sequential(*layers)
def forward(self, x):
tmp_x = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
tmp_x.append(x)
x = self.maxpool(x)
x = self.layer1(x)
tmp_x.append(x)
x = self.layer2(x)
tmp_x.append(x)
x = self.layer3(x)
tmp_x.append(x)
x = self.layer4(x)
tmp_x.append(x)
return tmp_x
class ResNet_locate(nn.Module):
def __init__(self, block, layers):
super(ResNet_locate,self).__init__()
self.resnet = ResNet(block, layers)
self.in_planes = 512
self.out_planes = [512, 256, 256, 128]
self.ppms_pre = nn.Conv2d(2048, self.in_planes, 1, 1, bias=False)
ppms, infos = [], []
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d(self.in_planes * 4, self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.resnet.load_state_dict(model, strict=False)
def forward(self, x):
x_size = x.size()[2:]
xs = self.resnet(x)
xs_1 = self.ppms_pre(xs[-1])
xls = [xs_1]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs_1), xs_1.size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[len(self.infos) - 1 - k].size()[2:], mode='bilinear', align_corners=True)))
return xs, infos
def resnet50_locate():
model = ResNet_locate(Bottleneck, [3, 4, 6, 3])
return model | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/networks/deeplab_resnet.py | 0.942573 | 0.528959 | deeplab_resnet.py | pypi |
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np
from .deeplab_resnet import resnet50_locate
from .vgg import vgg16_locate
config_vgg = {'convert': [[128,256,512,512,512],[64,128,256,512,512]], 'deep_pool': [[512, 512, 256, 128], [512, 256, 128, 128], [True, True, True, False], [True, True, True, False]], 'score': 128} # no convert layer, no conv6
config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,256,512,512]], 'deep_pool': [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128], [False, True, True, True, False], [True, True, True, True, False]], 'score': 128}
class ConvertLayer(nn.Module):
def __init__(self, list_k):
super(ConvertLayer, self).__init__()
up = []
for i in range(len(list_k[0])):
up.append(nn.Sequential(nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False), nn.ReLU(inplace=True)))
self.convert0 = nn.ModuleList(up)
def forward(self, list_x):
resl = []
for i in range(len(list_x)):
resl.append(self.convert0[i](list_x[i]))
return resl
class DeepPoolLayer(nn.Module):
def __init__(self, k, k_out, need_x2, need_fuse):
super(DeepPoolLayer, self).__init__()
self.pools_sizes = [2,4,8]
self.need_x2 = need_x2
self.need_fuse = need_fuse
pools, convs = [],[]
for i in self.pools_sizes:
pools.append(nn.AvgPool2d(kernel_size=i, stride=i))
convs.append(nn.Conv2d(k, k, 3, 1, 1, bias=False))
self.pools = nn.ModuleList(pools)
self.convs = nn.ModuleList(convs)
self.relu = nn.ReLU()
self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)
if self.need_fuse:
self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)
def forward(self, x, x2=None, x3=None):
x_size = x.size()
resl = x
for i in range(len(self.pools_sizes)):
y = self.convs[i](self.pools[i](x))
resl = torch.add(resl, F.interpolate(y, x_size[2:], mode='bilinear', align_corners=True))
resl = self.relu(resl)
if self.need_x2:
resl = F.interpolate(resl, x2.size()[2:], mode='bilinear', align_corners=True)
resl = self.conv_sum(resl)
if self.need_fuse:
resl = self.conv_sum_c(torch.add(torch.add(resl, x2), x3))
return resl
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k ,1, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True)
return x
def extra_layer(base_model_cfg, vgg):
if base_model_cfg == 'vgg':
config = config_vgg
elif base_model_cfg == 'resnet':
config = config_resnet
convert_layers, deep_pool_layers, score_layers = [], [], []
convert_layers = ConvertLayer(config['convert'])
for i in range(len(config['deep_pool'][0])):
deep_pool_layers += [DeepPoolLayer(config['deep_pool'][0][i], config['deep_pool'][1][i], config['deep_pool'][2][i], config['deep_pool'][3][i])]
score_layers = ScoreLayer(config['score'])
return vgg, convert_layers, deep_pool_layers, score_layers
class PoolNet(nn.Module):
def __init__(self, base_model_cfg, base, convert_layers, deep_pool_layers, score_layers):
super(PoolNet, self).__init__()
self.base_model_cfg = base_model_cfg
self.base = base
self.deep_pool = nn.ModuleList(deep_pool_layers)
self.score = score_layers
if self.base_model_cfg == 'resnet':
self.convert = convert_layers
def forward(self, x):
x_size = x.size()
conv2merge, infos = self.base(x)
if self.base_model_cfg == 'resnet':
conv2merge = self.convert(conv2merge)
conv2merge = conv2merge[::-1]
edge_merge = []
merge = self.deep_pool[0](conv2merge[0], conv2merge[1], infos[0])
for k in range(1, len(conv2merge)-1):
merge = self.deep_pool[k](merge, conv2merge[k+1], infos[k])
merge = self.deep_pool[-1](merge)
merge = self.score(merge, x_size)
return merge
def build_model(base_model_cfg='vgg'):
if base_model_cfg == 'vgg':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, vgg16_locate()))
elif base_model_cfg == 'resnet':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50_locate()))
def weights_init(m):
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_() | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/networks/poolnet.py | 0.854839 | 0.364127 | poolnet.py | pypi |
import torch
from torch import nn
from torch.nn import init
import torch.nn.functional as F
import math
from torch.autograd import Variable
import numpy as np
from .deeplab_resnet import resnet50_locate
from .vgg import vgg16_locate
config_vgg = {'convert': [[128,256,512,512,512],[64,128,256,512,512]], 'deep_pool': [[512, 512, 256, 128], [512, 256, 128, 128], [True, True, True, False], [True, True, True, False]], 'score': 256, 'edgeinfoc':[48,128], 'block': [[512, [16]], [256, [16]], [128, [16]]], 'fuse': [[16, 16, 16], True]} # no convert layer, no conv6
config_resnet = {'convert': [[64,256,512,1024,2048],[128,256,256,512,512]], 'deep_pool': [[512, 512, 256, 256, 128], [512, 256, 256, 128, 128], [False, True, True, True, False], [True, True, True, True, False]], 'score': 256, 'edgeinfoc':[64,128], 'block': [[512, [16]], [256, [16]], [256, [16]], [128, [16]]], 'fuse': [[16, 16, 16, 16], True]}
class ConvertLayer(nn.Module):
def __init__(self, list_k):
super(ConvertLayer, self).__init__()
up = []
for i in range(len(list_k[0])):
up.append(nn.Sequential(nn.Conv2d(list_k[0][i], list_k[1][i], 1, 1, bias=False), nn.ReLU(inplace=True)))
self.convert0 = nn.ModuleList(up)
def forward(self, list_x):
resl = []
for i in range(len(list_x)):
resl.append(self.convert0[i](list_x[i]))
return resl
class DeepPoolLayer(nn.Module):
def __init__(self, k, k_out, need_x2, need_fuse):
super(DeepPoolLayer, self).__init__()
self.pools_sizes = [2,4,8]
self.need_x2 = need_x2
self.need_fuse = need_fuse
pools, convs = [],[]
for i in self.pools_sizes:
pools.append(nn.AvgPool2d(kernel_size=i, stride=i))
convs.append(nn.Conv2d(k, k, 3, 1, 1, bias=False))
self.pools = nn.ModuleList(pools)
self.convs = nn.ModuleList(convs)
self.relu = nn.ReLU()
self.conv_sum = nn.Conv2d(k, k_out, 3, 1, 1, bias=False)
if self.need_fuse:
self.conv_sum_c = nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False)
def forward(self, x, x2=None, x3=None):
x_size = x.size()
resl = x
for i in range(len(self.pools_sizes)):
y = self.convs[i](self.pools[i](x))
resl = torch.add(resl, F.interpolate(y, x_size[2:], mode='bilinear', align_corners=True))
resl = self.relu(resl)
if self.need_x2:
resl = F.interpolate(resl, x2.size()[2:], mode='bilinear', align_corners=True)
resl = self.conv_sum(resl)
if self.need_fuse:
resl = self.conv_sum_c(torch.add(torch.add(resl, x2), x3))
return resl
class BlockLayer(nn.Module):
def __init__(self, k_in, k_out_list):
super(BlockLayer, self).__init__()
up_in1, up_mid1, up_in2, up_mid2, up_out = [], [], [], [], []
for k in k_out_list:
up_in1.append(nn.Conv2d(k_in, k_in//4, 1, 1, bias=False))
up_mid1.append(nn.Sequential(nn.Conv2d(k_in//4, k_in//4, 3, 1, 1, bias=False), nn.Conv2d(k_in//4, k_in, 1, 1, bias=False)))
up_in2.append(nn.Conv2d(k_in, k_in//4, 1, 1, bias=False))
up_mid2.append(nn.Sequential(nn.Conv2d(k_in//4, k_in//4, 3, 1, 1, bias=False), nn.Conv2d(k_in//4, k_in, 1, 1, bias=False)))
up_out.append(nn.Conv2d(k_in, k, 1, 1, bias=False))
self.block_in1 = nn.ModuleList(up_in1)
self.block_in2 = nn.ModuleList(up_in2)
self.block_mid1 = nn.ModuleList(up_mid1)
self.block_mid2 = nn.ModuleList(up_mid2)
self.block_out = nn.ModuleList(up_out)
self.relu = nn.ReLU()
def forward(self, x, mode=0):
x_tmp = self.relu(x + self.block_mid1[mode](self.block_in1[mode](x)))
# x_tmp = self.block_mid2[mode](self.block_in2[mode](self.relu(x + x_tmp)))
x_tmp = self.relu(x_tmp + self.block_mid2[mode](self.block_in2[mode](x_tmp)))
x_tmp = self.block_out[mode](x_tmp)
return x_tmp
class EdgeInfoLayerC(nn.Module):
def __init__(self, k_in, k_out):
super(EdgeInfoLayerC, self).__init__()
self.trans = nn.Sequential(nn.Conv2d(k_in, k_in, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_in, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True),
nn.Conv2d(k_out, k_out, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
def forward(self, x, x_size):
tmp_x = []
for i_x in x:
tmp_x.append(F.interpolate(i_x, x_size[2:], mode='bilinear', align_corners=True))
x = self.trans(torch.cat(tmp_x, dim=1))
return x
class FuseLayer1(nn.Module):
def __init__(self, list_k, deep_sup):
super(FuseLayer1, self).__init__()
up = []
for i in range(len(list_k)):
up.append(nn.Conv2d(list_k[i], 1, 1, 1))
self.trans = nn.ModuleList(up)
self.fuse = nn.Conv2d(len(list_k), 1, 1, 1)
self.deep_sup = deep_sup
def forward(self, list_x, x_size):
up_x = []
for i, i_x in enumerate(list_x):
up_x.append(F.interpolate(self.trans[i](i_x), x_size[2:], mode='bilinear', align_corners=True))
out_fuse = self.fuse(torch.cat(up_x, dim = 1))
if self.deep_sup:
out_all = []
for up_i in up_x:
out_all.append(up_i)
return [out_fuse, out_all]
else:
return [out_fuse]
class ScoreLayer(nn.Module):
def __init__(self, k):
super(ScoreLayer, self).__init__()
self.score = nn.Conv2d(k ,1, 3, 1, 1)
def forward(self, x, x_size=None):
x = self.score(x)
if x_size is not None:
x = F.interpolate(x, x_size[2:], mode='bilinear', align_corners=True)
return x
def extra_layer(base_model_cfg, base):
if base_model_cfg == 'vgg':
config = config_vgg
elif base_model_cfg == 'resnet':
config = config_resnet
convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers = [], [], [], [], [], []
convert_layers = ConvertLayer(config['convert'])
for k in config['block']:
block_layers += [BlockLayer(k[0], k[1])]
for i in range(len(config['deep_pool'][0])):
deep_pool_layers += [DeepPoolLayer(config['deep_pool'][0][i], config['deep_pool'][1][i], config['deep_pool'][2][i], config['deep_pool'][3][i])]
fuse_layers = FuseLayer1(config['fuse'][0], config['fuse'][1])
edgeinfo_layers = EdgeInfoLayerC(config['edgeinfoc'][0], config['edgeinfoc'][1])
score_layers = ScoreLayer(config['score'])
return base, convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers
class PoolNet(nn.Module):
def __init__(self, base_model_cfg, base, convert_layers, deep_pool_layers, block_layers, fuse_layers, edgeinfo_layers, score_layers):
super(PoolNet, self).__init__()
self.base_model_cfg = base_model_cfg
self.base = base
self.block = nn.ModuleList(block_layers)
self.deep_pool = nn.ModuleList(deep_pool_layers)
self.fuse = fuse_layers
self.edgeinfo = edgeinfo_layers
self.score = score_layers
if self.base_model_cfg == 'resnet':
self.convert = convert_layers
def forward(self, x, mode):
x_size = x.size()
conv2merge, infos = self.base(x) #increases memory usage for new image shapes
if self.base_model_cfg == 'resnet':
conv2merge = self.convert(conv2merge)
conv2merge = conv2merge[::-1]
edge_merge = []
merge = self.deep_pool[0](conv2merge[0], conv2merge[1], infos[0])
edge_merge.append(merge)
for k in range(1, len(conv2merge)-1):
merge = self.deep_pool[k](merge, conv2merge[k+1], infos[k])
edge_merge.append(merge)
if mode == 0:
edge_merge = [self.block[i](kk) for i, kk in enumerate(edge_merge)]
merge = self.fuse(edge_merge, x_size)
elif mode == 1:
merge = self.deep_pool[-1](merge)
edge_merge = [self.block[i](kk).detach() for i, kk in enumerate(edge_merge)]
edge_merge = self.edgeinfo(edge_merge, merge.size())
merge = self.score(torch.cat([merge, edge_merge], dim=1), x_size)
return merge
def build_model(base_model_cfg='vgg'):
if base_model_cfg == 'vgg':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, vgg16_locate()))
elif base_model_cfg == 'resnet':
return PoolNet(base_model_cfg, *extra_layer(base_model_cfg, resnet50_locate()))
def weights_init(m):
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0, 0.01)
if m.bias is not None:
m.bias.data.zero_() | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/networks/joint_poolnet.py | 0.824674 | 0.388618 | joint_poolnet.py | pypi |
import torch.nn as nn
import math
import torch
import numpy as np
import torch.nn.functional as F
# vgg16
def vgg(cfg, i, batch_norm=False):
layers = []
in_channels = i
stage = 1
for v in cfg:
if v == 'M':
stage += 1
if stage == 6:
layers += [nn.MaxPool2d(kernel_size=3, stride=1, padding=1)]
else:
layers += [nn.MaxPool2d(kernel_size=3, stride=2, padding=1)]
else:
if stage == 6:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return layers
class vgg16(nn.Module):
def __init__(self):
super(vgg16, self).__init__()
self.cfg = {'tun': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], 'tun_ex': [512, 512, 512]}
self.extract = [8, 15, 22, 29] # [3, 8, 15, 22, 29]
self.base = nn.ModuleList(vgg(self.cfg['tun'], 3))
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.base.load_state_dict(model, strict=False)
def forward(self, x):
tmp_x = []
for k in range(len(self.base)):
x = self.base[k](x)
if k in self.extract:
tmp_x.append(x)
return tmp_x
class vgg16_locate(nn.Module):
def __init__(self):
super(vgg16_locate,self).__init__()
self.vgg16 = vgg16()
self.in_planes = 512
self.out_planes = [512, 256, 128]
ppms, infos = [], []
for ii in [1, 3, 5]:
ppms.append(nn.Sequential(nn.AdaptiveAvgPool2d(ii), nn.Conv2d(self.in_planes, self.in_planes, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.ppms = nn.ModuleList(ppms)
self.ppm_cat = nn.Sequential(nn.Conv2d(self.in_planes * 4, self.in_planes, 3, 1, 1, bias=False), nn.ReLU(inplace=True))
for ii in self.out_planes:
infos.append(nn.Sequential(nn.Conv2d(self.in_planes, ii, 3, 1, 1, bias=False), nn.ReLU(inplace=True)))
self.infos = nn.ModuleList(infos)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, 0.01)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def load_pretrained_model(self, model):
self.vgg16.load_pretrained_model(model)
def forward(self, x):
x_size = x.size()[2:]
xs = self.vgg16(x)
xls = [xs[-1]]
for k in range(len(self.ppms)):
xls.append(F.interpolate(self.ppms[k](xs[-1]), xs[-1].size()[2:], mode='bilinear', align_corners=True))
xls = self.ppm_cat(torch.cat(xls, dim=1))
infos = []
for k in range(len(self.infos)):
infos.append(self.infos[k](F.interpolate(xls, xs[len(self.infos) - 1 - k].size()[2:], mode='bilinear', align_corners=True)))
return xs, infos | /saliency_detector-0.2.5.tar.gz/saliency_detector-0.2.5/saliency_detector/networks/vgg.py | 0.850903 | 0.40028 | vgg.py | pypi |
# Saliency Library
## Updates
🔴 Now framework-agnostic! [(Example core notebook)](Examples_core.ipynb) 🔴
🔗 For further explanation of the methods and more examples of the resulting maps, see our [Github Pages website](https://pair-code.github.io/saliency) 🔗
If upgrading from an older version, update old imports to `import saliency.tf1 as saliency`. We provide wrappers to make the framework-agnostic version compatible with TF1 models. [(Example TF1 notebook)](Examples_tf1.ipynb)
🔴 Added Performance Information Curve (PIC) - a human
independent metric for evaluating the quality of saliency methods.
([Example notebook](https://github.com/PAIR-code/saliency/blob/master/pic_metrics.ipynb)) 🔴
## Saliency Methods
This repository contains code for the following saliency techniques:
* Guided Integrated Gradients* ([paper](https://arxiv.org/abs/2106.09788), [poster](https://github.com/PAIR-code/saliency/blob/master/docs/CVPR_Guided_IG_Poster.pdf))
* XRAI* ([paper](https://arxiv.org/abs/1906.02825), [poster](https://github.com/PAIR-code/saliency/blob/master/docs/ICCV_XRAI_Poster.pdf))
* SmoothGrad* ([paper](https://arxiv.org/abs/1706.03825))
* Vanilla Gradients
([paper](https://scholar.google.com/scholar?q=Visualizing+higher-layer+features+of+a+deep+network&btnG=&hl=en&as_sdt=0%2C22),
[paper](https://arxiv.org/abs/1312.6034))
* Guided Backpropogation ([paper](https://arxiv.org/abs/1412.6806))
* Integrated Gradients ([paper](https://arxiv.org/abs/1703.01365))
* Occlusion
* Grad-CAM ([paper](https://arxiv.org/abs/1610.02391))
* Blur IG ([paper](https://arxiv.org/abs/2004.03383))
\*Developed by PAIR.
This list is by no means comprehensive. We are accepting pull requests to add
new methods!
## Evaluation of Saliency Methods
The repository provides an implementation of Performance Information Curve (PIC) -
a human independent metric for evaluating the quality of saliency methods
([paper](https://arxiv.org/abs/1906.02825),
[poster](https://github.com/PAIR-code/saliency/blob/master/docs/ICCV_XRAI_Poster.pdf),
[code](https://github.com/PAIR-code/saliency/blob/master/saliency/metrics/pic.py),
[notebook](https://github.com/PAIR-code/saliency/blob/master/pic_metrics.ipynb)).
## Download
```
# To install the core subpackage:
pip install saliency
# To install core and tf1 subpackages:
pip install saliency[tf1]
```
or for the development version:
```
git clone https://github.com/pair-code/saliency
cd saliency
```
## Usage
The saliency library has two subpackages:
* `core` uses a generic `call_model_function` which can be used with any ML
framework.
* `tf1` accepts input/output tensors directly, and sets up the necessary
graph operations for each method.
### Core
Each saliency mask class extends from the `CoreSaliency` base class. This class
contains the following methods:
* `GetMask(x_value, call_model_function, call_model_args=None)`: Returns a mask
of
the shape of non-batched `x_value` given by the saliency technique.
* `GetSmoothedMask(x_value, call_model_function, call_model_args=None, stdev_spread=.15, nsamples=25, magnitude=True)`:
Returns a mask smoothed of the shape of non-batched `x_value` with the
SmoothGrad technique.
The visualization module contains two methods for saliency visualization:
* ```VisualizeImageGrayscale(image_3d, percentile)```: Marginalizes across the
absolute value of each channel to create a 2D single channel image, and clips
the image at the given percentile of the distribution. This method returns a
2D tensor normalized between 0 to 1.
* ```VisualizeImageDiverging(image_3d, percentile)```: Marginalizes across the
value of each channel to create a 2D single channel image, and clips the
image at the given percentile of the distribution. This method returns a
2D tensor normalized between -1 to 1 where zero remains unchanged.
If the sign of the value given by the saliency mask is not important, then use
```VisualizeImageGrayscale```, otherwise use ```VisualizeImageDiverging```. See
the SmoothGrad paper for more details on which visualization method to use.
##### call_model_function
`call_model_function` is how we pass inputs to a given model and receive the outputs
necessary to compute saliency masks. The description of this method and expected
output format is in the `CoreSaliency` description, as well as separately for each method.
##### Examples
[This example iPython notebook](http://github.com/pair-code/saliency/blob/master/Examples_core.ipynb)
showing these techniques is a good starting place.
Here is a condensed example of using IG+SmoothGrad with TensorFlow 2:
```
import saliency.core as saliency
import tensorflow as tf
...
# call_model_function construction here.
def call_model_function(x_value_batched, call_model_args, expected_keys):
tape = tf.GradientTape()
grads = np.array(tape.gradient(output_layer, images))
return {saliency.INPUT_OUTPUT_GRADIENTS: grads}
...
# Load data.
image = GetImagePNG(...)
# Compute IG+SmoothGrad.
ig_saliency = saliency.IntegratedGradients()
smoothgrad_ig = ig_saliency.GetSmoothedMask(image,
call_model_function,
call_model_args=None)
# Compute a 2D tensor for visualization.
grayscale_visualization = saliency.VisualizeImageGrayscale(
smoothgrad_ig)
```
### TF1
Each saliency mask class extends from the `TF1Saliency` base class. This class
contains the following methods:
* `__init__(graph, session, y, x)`: Constructor of the SaliencyMask. This can
modify the graph, or sometimes create a new graph. Often this will add nodes
to the graph, so this shouldn't be called continuously. `y` is the output
tensor to compute saliency masks with respect to, `x` is the input tensor
with the outer most dimension being batch size.
* `GetMask(x_value, feed_dict)`: Returns a mask of the shape of non-batched
`x_value` given by the saliency technique.
* `GetSmoothedMask(x_value, feed_dict)`: Returns a mask smoothed of the shape
of non-batched `x_value` with the SmoothGrad technique.
The visualization module contains two visualization methods:
* ```VisualizeImageGrayscale(image_3d, percentile)```: Marginalizes across the
absolute value of each channel to create a 2D single channel image, and clips
the image at the given percentile of the distribution. This method returns a
2D tensor normalized between 0 to 1.
* ```VisualizeImageDiverging(image_3d, percentile)```: Marginalizes across the
value of each channel to create a 2D single channel image, and clips the
image at the given percentile of the distribution. This method returns a
2D tensor normalized between -1 to 1 where zero remains unchanged.
If the sign of the value given by the saliency mask is not important, then use
```VisualizeImageGrayscale```, otherwise use ```VisualizeImageDiverging```. See
the SmoothGrad paper for more details on which visualization method to use.
##### Examples
[This example iPython notebook](http://github.com/pair-code/saliency/blob/master/Examples_tf1.ipynb) shows
these techniques is a good starting place.
Another example of using GuidedBackprop with SmoothGrad from TensorFlow:
```
from saliency.tf1 import GuidedBackprop
from saliency.tf1 import VisualizeImageGrayscale
import tensorflow.compat.v1 as tf
...
# Tensorflow graph construction here.
y = logits[5]
x = tf.placeholder(...)
...
# Compute guided backprop.
# NOTE: This creates another graph that gets cached, try to avoid creating many
# of these.
guided_backprop_saliency = GuidedBackprop(graph, session, y, x)
...
# Load data.
image = GetImagePNG(...)
...
smoothgrad_guided_backprop =
guided_backprop_saliency.GetMask(image, feed_dict={...})
# Compute a 2D tensor for visualization.
grayscale_visualization = visualization.VisualizeImageGrayscale(
smoothgrad_guided_backprop)
```
## Conclusion/Disclaimer
If you have any questions or suggestions for improvements to this library,
please contact the owners of the `PAIR-code/saliency` repository.
This is not an official Google product. | /saliency-0.2.0.tar.gz/saliency-0.2.0/README.md | 0.815416 | 0.968856 | README.md | pypi |
class CastBase:
"""
Identical to regular dict/list except on instantiation it performs a
one-off recursive recasting of all contained items that have a type listed
in cast_map to the mapped target type.
Also automatically recasts any list or dict instances to CastList and
CastDict respectively and recurses them.
Will only recurse lists and dicts, so if you want to recurse into tuples
or sets, make sure to include a conversion for these into lists.
Useful if you have stubborn objects that won't pickle or export to json for
example.
cast_map is a dict of conversion types and their recast targets, e.g.:
{int: str, float: Decimal}
Conversion types can be a tuple of types:
{(int, float): str}
"""
def __init__(self, *args, cast_map, **kwargs):
# recast keys of cast_map to always be tuples
cast_map = {(tuple([key]) if not isinstance(key, tuple) else key): value for key, value in cast_map.items()}
self.cast_map = cast_map
super().__init__(*args, **kwargs)
self._recast()
def _recast(self):
raise NotImplemented()
def _recast_item(self, item):
# Try to recast it first, then recurse
for src_types, dst_type in self.cast_map.items():
for src_type in src_types:
if isinstance(item, src_type):
item = dst_type(item)
break
if isinstance(item, list):
return CastList(item, cast_map=self.cast_map)
elif isinstance(item, dict):
return CastDict(item, cast_map=self.cast_map)
else:
return item
class CastDictMixin(CastBase):
def _recast(self):
for k, v in self.items():
self[k] = self._recast_item(v)
class CastListMixin(CastBase):
def _recast(self):
for i, item in enumerate(self):
self[i] = self._recast_item(item)
class CastDict(CastDictMixin, dict):
pass
class CastList(CastListMixin, list):
pass | /salix_containers-0.5.0.tar.gz/salix_containers-0.5.0/salix_containers/casttypes.py | 0.763572 | 0.553083 | casttypes.py | pypi |
import re
from jmespath import functions, search
class CustomFunctions(functions.Functions):
@functions.signature({'types': ['string']}, {'types': ['string']})
def _func_re_search(self, s, pattern):
return re.search(pattern, s) is not None
@functions.signature({'types': ['object']})
def _func_to_entries(self, o):
"""
As per jq to_entries():
{'a': 'A', 'b': 'B'} -> [{'key': 'a', 'value': 'A'}, {'key': 'b', 'value': 'B'}]
"""
return [{'key': k, 'value': v} for k,v in o.items()]
@functions.signature({'types': ['array']})
def _func_from_entries(self, a):
"""
As per jq from_entries():
[{'key': 'a', 'value': 'A'}, {'key': 'b', 'value': 'B'}] -> {'a': 'A', 'b': 'B'}
"""
return {item['key']: item['value'] for item in a}
@functions.signature({'types': ['object']}, {'types': ['string']})
def _func_with_entries(self, o, expr):
"""
Similar to jq with_entries(), this applies expr as a filter combined with to_entries and from_entries:
to_entries(@) | <filter> | from_entries(@)
The filter expression must be such that it evaluates to either true or false.
Example:
> o = {"a": "A", "b": "B"}
> with_entries(o, "key!=`b`")
{"a": "A}
"""
a = self._func_to_entries(o)
a_map = [item for item in a if search(expr, item) is True]
return self._func_from_entries(a_map)
@functions.signature({'types': ['object']}, {'types': ['string', 'array']})
def _func_remove_keys(self, o, keys):
"""
Remove the keys listed in 'keys' array from the object.
Example:
> o = {"a": "A", "b": "B", "c": "C"}
> remove_keys(o, ['a', 'b'])
{"c": "C"}
keys can either be an array of keys, or a single key name supplied as a string
e.g. remove_keys(@, 'a')
"""
if isinstance(keys, str):
keys = [keys]
parts = [f'key!=`{k}`' for k in keys]
expr = ' && '.join(parts)
return self._func_with_entries(o, expr) | /salix_jmespath_tools-0.2.0-py3-none-any.whl/salix_jmespath_tools/customfunctions.py | 0.611846 | 0.331972 | customfunctions.py | pypi |
import numpy as np
class TestRunData:
def __init__(self):
self.test_passed = 0
self.test_failed = 0
self.test_not_run = 0
self.total_steps = 0
self.passed_steps = 0
self.failed_steps = 0
self.data_test = []
def get_test_passed(self):
return self.test_passed
def get_test_failed(self):
return self.test_failed
def get_test_not_run(self):
return self.test_not_run
def get_number_of_test_case(self):
return len(self.data_test)
def add_test_case(self, name, order):
test_case = TestCaseData(name, order)
self.data_test.append(test_case)
return test_case
def get_test_case_by_order(self, order):
"""
test: (TestCaseData)
"""
for test in self.data_test:
if test.get_order() == order:
return test
def get_test_data(self):
return self.data_test
def get_passed_test_case(self):
self.test_passed = 0
for test in self.data_test:
if test.get_status() == "pass":
self.test_passed += 1
return self.test_passed
def get_failed_test_case(self):
self.test_failed = 0
for test in self.data_test:
if test.get_status() == "fail":
self.test_failed += 1
return self.test_failed
def get_total_steps(self):
self.total_steps = 0
for test in self.data_test:
self.total_steps += test.get_number_of_steps()
return self.total_steps
def get_total_passed_steps(self):
self.passed_steps = 0
for test in self.data_test:
self.passed_steps += test.get_passed_steps()
return self.passed_steps
def get_total_failed_steps(self):
self.failed_steps = 0
for test in self.data_test:
self.failed_steps += test.get_failed_steps()
return self.failed_steps
class TestCaseData:
def __init__(self, name, order):
self._name = name
self._order = order
self._passed_steps = 0
self._failed_steps = 0
self._total_steps = 0
self._status = None
self._steps = []
def get_steps(self):
return self._steps
def get_number_of_steps(self):
return len(self._steps)
def get_name(self):
return self._name
def get_order(self):
return self._order
def get_total_steps(self):
return self._total_steps
def get_passed_steps(self):
return self._passed_steps
def get_failed_steps(self):
return self._failed_steps
def get_status(self):
return self._status
def get_passed_percentage(self, decimals):
return np.round((self._passed_steps / self._total_steps) * 100, decimals)
def set_status(self, status):
self._status = status
def check_status(self):
if self._failed_steps >= 1:
self.set_status("fail")
else:
self.set_status("pass")
def add_step(self, order,
description=None,
status=None,
time=None,
method=None,
error_message=None,
error_line=None,
error_line_module=None):
step = StepData(order,
description=description,
status=status,
time=time,
func=method,
error_message=error_message,
error_line=error_line,
error_line_module=error_line_module)
self._total_steps += 1
if status == "pass":
self._passed_steps += 1
if status == "fail":
self._failed_steps += 1
self.check_status()
self._steps.append(step)
def get_step_by_order(self, order):
for step in self._steps:
if step.get_order() == order:
return step
class StepData:
def __init__(self,
order,
description=None,
status=None,
time=None,
func=None,
error_message=None,
error_line=None,
error_line_module=None):
self._order = order
if description is None:
self._description = "There is no description"
else:
self._description = description
self._status = status
self._time = time
self._func = func
if error_message is None:
self._error_message = "There is no error message"
else:
self._error_message = error_message
self._error_line = error_line
self._error_line_module = error_line_module
def get_order(self):
return self._order
def get_description(self):
return self._description
def get_status(self):
return self._status
def get_time(self):
return self._time
def get_method(self):
return self._func
def get_error_message(self):
return self._error_message
def get_error_line(self):
return self._error_line
def get_error_line_module(self):
return self._error_line_module
def set_order(self, order):
self._order = order
def set_description(self, description):
self._description = description
def set_status(self, status):
self._status = status
def set_time(self, time):
self._time = time
def set_method(self, method):
self._func = method
def set_error_message(self, error_message):
self._error_message = error_message
def set_error_line(self, error_line):
self._error_line = error_line
def set_error_line_module(self, error_line_module):
self._error_line_module = error_line_module | /sallust-0.15.13.tar.gz/sallust-0.15.13/Sallust/Tools/TestData.py | 0.467818 | 0.645232 | TestData.py | pypi |
from fpdf import FPDF
class ResultsPDF(FPDF):
def set_data(self, data):
self.data = data
def header(self):
title = "Results of Test Run"
# Arial bold 15
self.set_font('Arial', size=15)
# Calculate width of title and position
w = 170
self.set_x((210 - w) / 2)
# Colors of frame, background and text
self.set_draw_color(48, 62, 88)
self.set_fill_color(92, 116, 144)
self.set_text_color(255, 255, 255)
# Thickness of frame (1 mm)
self.set_line_width(0.7)
# Title
self.cell(w, 16, title, 1, 1, 'C', 1)
self.image('sallust/GUI/img/Apyno_logo_big.png', 25, 12, 13, 13)
# Line break
self.ln(10)
self.draw_border_lines()
def footer(self):
# Position at 1.5 cm from bottom
self.set_y(-15)
# Arial italic 8
self.set_font('Arial', 'I', 8)
# Text color in gray
self.set_text_color(128)
# Page number
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
def draw_border_lines(self):
self.line(20, 25, 20, self.h - 30)
self.line(190, 25, 190, self.h - 30)
self.line(20, self.h - 30, 190, self.h-30)
def draw_resume_data_row(self, tag, value):
self.set_draw_color(0, 0, 0)
self.set_fill_color(92, 116, 144)
self.set_text_color(255, 255, 255)
spacing = 3
col_width = self.w / 7
row_height = self.font_size - 1
self.cell(col_width, (row_height * spacing), str(tag), border=1, fill=1, align="C")
self.set_fill_color(255, 255, 255)
self.set_text_color(0, 0, 0)
self.cell(col_width, (row_height * spacing), str(value), border=1, fill=1, align="C")
self.ln(7)
def draw_resume_data(self, data):
self.set_draw_color(212, 212, 212)
self.set_fill_color(92, 116, 144)
self.set_text_color(255, 255, 255)
self.set_left_margin(30)
self.ln(5)
self.set_font("Arial", size=9)
self.image("temp/pie.png", 98, 30, 90, 80)
self.draw_resume_data_row("Total test case", data.get_number_of_test_case())
self.draw_resume_data_row("Passed test case", data.get_passed_test_case())
self.draw_resume_data_row("Failed test case", data.get_failed_test_case())
self.draw_resume_data_row("Total steps", data.get_total_steps())
self.draw_resume_data_row("Passed steps", data.get_total_passed_steps())
self.draw_resume_data_row("Failed steps", data.get_total_failed_steps())
self.ln(30)
def data_table(self, data):
self.set_font("Arial", size=13)
self.set_left_margin(80)
self.cell(0, 0, "TABLE OF TEST CASE")
self.set_left_margin(30)
self.ln(8)
self.set_font("Arial", size=6)
self.set_draw_color(212, 212, 212)
self.set_fill_color(92, 116, 144)
self.set_text_color(255, 255, 255)
self.set_left_margin(30)
spacing = 3
col_width = self.w / 7
row_height = self.font_size
self.cell(col_width, (row_height * spacing),
txt="Test Case", border=1, fill=1, align='C')
self.cell(col_width, (row_height * spacing),
txt="Passed Steps", border=1, fill=1, align='C')
self.cell(col_width, (row_height * spacing),
txt="Failed Steps", border=1, fill=1, align='C')
self.cell(col_width, (row_height * spacing),
txt="Total Steps", border=1, fill=1, align='C')
self.cell(col_width, (row_height * spacing),
txt="% Steps", border=1, fill=1, align='C')
self.ln(6)
self.set_left_margin(30)
for i in range(data.get_number_of_test_case()):
test_d = data.get_test_case_by_order(i)
data_list = [test_d.get_name(),
test_d.get_passed_steps(),
test_d.get_failed_steps(),
test_d.get_total_steps(),
str(test_d.get_passed_percentage(1)) + "%"]
for element in data_list:
if test_d.get_status() == "pass":
self.set_fill_color(34, 134, 58)
else:
self.set_fill_color(146, 46, 46)
self.cell(col_width, (row_height * spacing),
txt=str(element), border=1, fill=1, align='C')
self.ln(6)
self.ln(10)
def check_end(self, distance=250):
y = self.get_y()
if y > distance:
self.add_page()
def case_by_case(self, data):
self.set_text_color(0, 0, 0)
self.set_draw_color(48, 62, 88)
self.set_font("Arial", size=13)
self.set_left_margin(87)
self.check_end(190)
self.cell(0, 0, "FAILED STEPS")
self.set_left_margin(00)
self.ln(10)
self.set_font("Arial", size=11)
for i in range(data.get_number_of_test_case()):
self.set_left_margin(0)
test_d = data.get_test_case_by_order(i)
if test_d.get_status() == "pass":
continue
self.set_font("Arial", size=11)
self.set_text_color(255, 255, 255)
self.set_draw_color(212, 212, 212)
if test_d.get_status() == "pass":
self.set_fill_color(34, 134, 58)
else:
self.set_fill_color(146, 46, 46)
self.check_end(200)
self.set_left_margin(30)
self.cell(150, self.font_size + 2, txt=test_d.get_name(), border=1, fill=1, align="C")
self.set_left_margin(0)
self.ln(11)
self.set_font("Arial", size=8)
for j in range(test_d.get_number_of_steps()):
self.set_left_margin(0)
self.set_font("Arial", size=8)
step = test_d.get_step_by_order(j+1)
if step.get_status() == "fail":
x = self.get_x()
y = self.get_y()
title_string = "(Step " + str(step.get_order()) + "). " + step.get_description()
self.line(x+50, y + 2, 150, y + 2)
self.check_end()
self.set_text_color(0, 0, 0)
self.cell(0, 0, txt=title_string, align="C")
self.ln(5)
self.set_font("Arial", size=6)
self.check_end()
self.cell(0, 3, txt="status: " + str(step.get_status()), align="C")
self.ln(3)
self.check_end()
self.cell(0, 3, txt="execution time: " + str(step.get_time()), align="C")
self.ln(3)
self.set_fill_color(170, 211, 231)
self.set_left_margin(50)
self.multi_cell(100, 3, txt=str(step.get_method()), border=1, fill=1)
self.set_left_margin(0)
self.ln(3)
self.set_fill_color(212, 109, 109)
self.set_left_margin(50)
self.check_end()
if step.get_error_message() is not None:
self.multi_cell(100, 3, txt=step.get_error_message(), border=1, fill=1)
self.set_left_margin(0)
self.ln(5)
self.set_left_margin(0)
self.ln(10) | /sallust-0.15.13.tar.gz/sallust-0.15.13/Sallust/Tools/ResultsPDF.py | 0.535827 | 0.280376 | ResultsPDF.py | pypi |
import tkinter as tk
from sallust import Constants
class DataTable(tk.Frame):
def __init__(self, parent, controller, data=None):
tk.Frame.__init__(self, parent)
self.controller = controller
self.data = data
self.configure(bg=Constants.dark_color)
self.index_row = 1
self.index_column = 2
self.rowconfigure(0, minsize=40)
for i in range(7):
self.columnconfigure(i, weight=1)
def set_data(self, data):
self.data = data
def update_all(self):
self.create_title_row()
self.create_row()
def create_title_row(self):
label_passed_steps = tk.Label(self,
text="Passed Steps",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
label_failed_steps = tk.Label(self,
text="Failed steps",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
label_total_steps = tk.Label(self,
text="Total steps",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
label_percentage_steps = tk.Label(self,
text="% steps",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
label_passed_steps.grid(row=self.index_row,
column=self.index_column,
sticky="nwes",
padx=1, pady=1)
label_failed_steps.grid(row=self.index_row,
column=self.index_column + 1,
sticky="nwes",
padx=1, pady=1)
label_total_steps.grid(row=self.index_row,
column=self.index_column + 2,
sticky="nwes",
padx=1, pady=1)
label_percentage_steps.grid(row=self.index_row,
column=self.index_column + 3,
sticky="nwes",
padx=1, pady=1)
def create_row(self):
for i in range(self.data.get_number_of_test_case()):
test_d = self.data.get_test_case_by_order(i)
for j in range(test_d.get_number_of_steps()):
step = test_d.get_step_by_order(j + 1)
for i in range(self.data.get_number_of_test_case()):
test_d = self.data.get_test_case_by_order(i)
status = test_d.get_status()
if status == "pass":
color = Constants.green_color
else:
color = Constants.red_color
label = tk.Label(self,
text=str(test_d.get_name()),
font=Constants.text_font(),
fg=Constants.text_color,
bg=color,
bd=10
)
label.grid(row=self.index_row + i + 1,
column=self.index_column - 1,
sticky="nwes",
padx=1, pady=1)
label_passed = tk.Label(self,
text=str(test_d.get_passed_steps()),
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10
)
label_passed.grid(row=self.index_row + i + 1,
column=self.index_column,
sticky="nwes",
padx=1, pady=1)
label_failed = tk.Label(self,
text=str(test_d.get_failed_steps()),
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10
)
label_failed.grid(row=self.index_row + i + 1,
column=self.index_column + 1,
sticky="nwes",
padx=1, pady=1)
label_total = tk.Label(self,
text=str(test_d.get_total_steps()),
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10
)
label_total.grid(row=self.index_row + i + 1,
column=self.index_column + 2,
sticky="nwes",
padx=1, pady=1)
label_percentage = tk.Label(self,
text=str(test_d.get_passed_percentage(1)) + "%",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10
)
label_percentage.grid(row=self.index_row + i + 1,
column=self.index_column + 3,
sticky="nwes",
padx=1, pady=1) | /sallust-0.15.13.tar.gz/sallust-0.15.13/Sallust/GUI/DataTable.py | 0.448668 | 0.224725 | DataTable.py | pypi |
import tkinter as tk
import numpy as np
import os
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.widgets import Slider
from sallust.GUI.GraphicsWidgets import TestCaseTable, StepsTable
from sallust import Constants
class Graphics(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Assign the arguments
self.controller = controller
# Configure the frame
self.configure(bg=Constants.dark_color)
self.rowconfigure(0, weight=50)
self.rowconfigure(1, weight=8000)
self.columnconfigure(0, weight=1)
# Create Variables
self.canvas = None
self.canvas_bars_frame = None
self.scroll_value = 0.0
self.higher_test = 0
self.one = None
self.sizes = [0, 0]
self.test_length = 0
self.passed = None
self.failed = None
self.labels = "Passed", "Failed"
self.colors = Constants.green_color, Constants.red_color
font = {'family': 'Verdana',
'weight': 'bold',
'size': 8}
# Configure matplotlib
matplotlib.rc('font', **font)
matplotlib.rcParams['text.color'] = Constants.text_color
# Create the main frames
self.top_frame = tk.Frame(self, bg=Constants.dark_color)
self.top_frame.grid(row=0, column=0, sticky="nwes")
self.top_frame.columnconfigure(0, weight=40)
self.top_frame.columnconfigure(1, weight=500)
self.top_frame.rowconfigure(0, weight=1)
self.top_right_frame = tk.Frame(self.top_frame, bg=Constants.dark_color, bd=10)
self.top_right_frame.grid(row=0, column=0, sticky="nwes")
self.top_right_frame.columnconfigure(0, weight=1)
self.table_frame = TestCaseTable.TestCaseTable(self.top_right_frame, self.controller, self)
self.table_frame.grid(row=0, column=0, sticky="nwes", pady=3)
self.steps_frame = StepsTable.StepsTable(self.top_right_frame, self.controller, self)
self.steps_frame.grid(row=1, column=0, sticky="nwes", pady=3)
self.pie_frame = tk.Frame(self.top_frame, bg=Constants.dark_color)
self.pie_frame.grid(row=0, column=1, sticky="nwes", pady=3)
self.bars_frame = tk.Canvas(self, bg=Constants.light_color, bd=10)
self.bars_frame.grid(row=1, column=0, sticky="nwes")
# Create the figures of the plot
self.figure_pie = Figure(figsize=(1, 1), dpi=110)
self.figure_bars = Figure(figsize=(2, 1), dpi=110)
self.figure_pie.set_facecolor(Constants.light_color)
self.figure_bars.set_facecolor(Constants.light_color)
self.plot = self.figure_pie.add_subplot(111)
self.plot_bars = self.figure_bars.add_subplot(111)
# Create the canvas
self.canvas = FigureCanvasTkAgg(self.figure_pie, self.pie_frame)
self.canvas_bars = FigureCanvasTkAgg(self.figure_bars, self.bars_frame)
# Configure the canvas widget
self.canvas.get_tk_widget().configure(bg=Constants.dark_color, bd=10)
self.canvas.get_tk_widget().pack(side=tk.TOP, fill="both", expand=1)
self.canvas_bars.get_tk_widget().configure(bg=Constants.dark_color, bd=10)
self.canvas_bars.get_tk_widget().pack()
# Draw an empty plot
self.draw()
def update_graphics(self):
self.draw()
self.table_frame.update_table()
self.steps_frame.update_table()
if not os.path.exists("temp/"):
os.makedirs("temp/")
self.figure_pie.savefig("temp/pie.png")
def draw(self):
# Clear the plots
self.figure_pie.clear()
self.figure_bars.clear()
# Get the test data
self.passed = [x[0] for x in self.controller.data_tests]
self.failed = [y[1] for y in self.controller.data_tests]
steps = []
# Get the number of steps of every test case
for i in range(len(self.passed)):
steps.append(self.passed[i] + self.failed[i])
# Set the higher number of steps
self.higher_test = 0
# get the highest number of steps of a single test case
for element in steps:
if element > self.higher_test:
self.higher_test = element
# get the number of test case
self.test_length = len(steps)
# get evenly spaced values between the number of test
ind = np.arange(self.test_length)
# Create the Pie Plot
self.plot = self.figure_pie.add_subplot(111)
self.plot.set_title("Results of the test run")
sizes = [self.controller.test_passed, self.controller.test_failed]
self.plot.pie(sizes,
labels=self.labels,
shadow=True,
colors=self.colors,
startangle=90,
autopct='%1.1f%%',
counterclock=False)
# Draw Pie plot
self.canvas.draw()
# Create the Bar plot
self.plot_bars = self.figure_bars.add_subplot(111, ylabel="Steps", xlabel="")
# Set Title and Colors
self.plot_bars.set_title("Case by Case")
self.plot_bars.spines['bottom'].set_color(Constants.white)
self.plot_bars.spines['top'].set_color(Constants.white)
self.plot_bars.spines['right'].set_color(Constants.white)
self.plot_bars.spines['left'].set_color(Constants.white)
self.plot_bars.yaxis.label.set_color(Constants.white)
self.plot_bars.xaxis.label.set_color(Constants.white)
self.plot_bars.set_facecolor(Constants.light_color)
# Only if there is any data adjust the axis
if self.test_length > 0:
self.plot_bars.axis([-0.5, self.test_length - 0.5, 0, self.higher_test * 1.3])
self.plot_bars.bar(ind, steps, 0.2, color=Constants.red_color)
self.plot_bars.bar(ind, self.passed, 0.2, color=Constants.green_color)
# Set Ticks
self.plot_bars.set_xticks(ind)
self.plot_bars.set_xticklabels(range(1, self.test_length + 1))
self.plot_bars.tick_params(direction='out',
length=6,
width=2,
grid_alpha=0.5,
color=Constants.white,
labelcolor=Constants.white)
# If there are more than seven test case add a slider
if self.test_length > 7:
ax = self.figure_bars.add_axes([0.125, 0.855, 0.775, 0.03])
self.one = Slider(ax, "", 3, self.test_length - 4)
del self.one.label
self.one.on_changed(self.update_slider)
self.update_slider(0.01)
self.canvas_bars.get_tk_widget().configure(bg=Constants.dark_color, bd=10)
self.canvas_bars.get_tk_widget().pack(expand=1, fill="both")
self.canvas_bars_frame = self.canvas_bars.get_tk_widget()
self.canvas_bars.draw()
def update_slider(self, val):
pos = self.one.val
self.one.valtext.set_text("")
self.plot_bars.axis([pos - 3.5, pos + 3.5, 0, self.higher_test * 1.3])
self.figure_bars.canvas.draw_idle() | /sallust-0.15.13.tar.gz/sallust-0.15.13/Sallust/GUI/Graphics.py | 0.635675 | 0.284992 | Graphics.py | pypi |
import tkinter as tk
from sallust import Constants as Constants
import numpy as np
class TestCaseTable(tk.Frame):
def __init__(self, parent, controller, graphics, **kw):
"""
This class creates a table that represents the passed and failed steps
:type parent: (tk.Frame)
:type controller: (Window)
:type graphics: (Graphics)
"""
super().__init__(parent, **kw)
# Assign the arguments
self.parent = parent
self.controller = controller
self.graphics = graphics
# Configure the Frame
self.configure(bg=Constants.light_color)
# Initialize the table variables
self.index_row = 2
self.index_column = 2
# Create the table elements
self.data_table_column = tk.Label(self,
text=" Nº Test ",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
self.data_table_column_percentage = tk.Label(self,
text="% Test ",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
self.data_table_passed_header = tk.Label(self,
text="Passed",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.green_color,
bd=10)
self.data_table_failed_header = tk.Label(self,
text="Failed ",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.red_color,
bd=10)
self.data_table_passed_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
self.data_table_failed_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
self.data_table_passed_percentage_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
self.data_table_failed_percentage_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
# Configure table rows and columns
self.columnconfigure(0, weight=1)
self.columnconfigure(5, weight=1)
self.rowconfigure(2, weight=1)
self.rowconfigure(0, minsize=40)
self.rowconfigure(5, minsize=40)
# Grid table elements
self.data_table_column.grid(row=self.index_row,
column=self.index_column + 1,
sticky="nwes",
pady=1, padx=1)
self.data_table_column_percentage.grid(row=self.index_row,
column=self.index_column + 2,
sticky="nwes",
pady=1, padx=1)
self.data_table_passed_header.grid(row=self.index_row + 1,
column=self.index_column,
sticky="nwes", pady=1,
padx=1)
self.data_table_failed_header.grid(row=self.index_row + 2,
column=self.index_column,
sticky="nwes",
pady=1, padx=1)
self.data_table_passed_result.grid(row=self.index_row + 1,
column=self.index_column + 1,
sticky="nwes",
pady=1, padx=1)
self.data_table_failed_result.grid(row=self.index_row + 2,
column=self.index_column + 1,
sticky="nwes", pady=1, padx=1)
self.data_table_passed_percentage_result.grid(row=self.index_row + 1,
column=self.index_column + 2,
sticky="nwes",
pady=1, padx=1)
self.data_table_failed_percentage_result.grid(row=self.index_row + 2,
column=self.index_column + 2,
sticky="nwes",
pady=1, padx=1)
def update_table(self):
# Calculate percentages
passed_per = np.round(
(self.controller.test_passed / (self.controller.test_passed + self.controller.test_failed)) * 100, 1)
failed_per = np.round(
(self.controller.test_failed / (self.controller.test_passed + self.controller.test_failed)) * 100, 1)
# Update the table values
self.data_table_passed_result.configure(text=self.controller.test_passed)
self.data_table_failed_result.configure(text=self.controller.test_failed)
self.data_table_passed_percentage_result.configure(text=passed_per)
self.data_table_failed_percentage_result.configure(text=failed_per) | /sallust-0.15.13.tar.gz/sallust-0.15.13/Sallust/GUI/GraphicsWidgets/TestCaseTable.py | 0.754553 | 0.219819 | TestCaseTable.py | pypi |
import tkinter as tk
import numpy as np
from sallust import Constants as Constants
class StepsTable(tk.Frame):
"""This class creates a table that represents the passed and failed steps"""
def __init__(self, parent, controller, graphics, **kw):
"""
This class creates a table that represents the passed and failed steps
:type parent: (tk.Frame)
:type controller: (Window)
:type graphics: (Graphics)
"""
super().__init__(parent, **kw)
# Assign arguments
self.controller = controller
self.steps_frame = parent
self.graphics = graphics
# set the row and column initial values
self.index_row = 2
self.index_column = 2
# configure frame
self.configure(bg=Constants.light_color)
# Create table elements
self.steps_table_column_steps = tk.Label(self,
text="Nº Steps",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
self.steps_table_column_percentage = tk.Label(self,
text="% Steps",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.medium_color,
bd=10)
self.steps_table_passed_header = tk.Label(self,
text="Passed",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.green_color,
bd=10)
self.steps_table_failed_header = tk.Label(self,
text="Failed ",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.red_color,
bd=10)
self.steps_table_passed_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
self.steps_table_failed_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
self.steps_table_passed_percentage_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
self.steps_table_failed_percentage_result = tk.Label(self,
text="0",
font=Constants.text_font(),
fg=Constants.text_color,
bg=Constants.ultra_light_color,
bd=10)
# Grid elements
self.steps_table_column_steps.grid(row=self.index_row,
column=self.index_column + 1,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_column_percentage.grid(row=self.index_row,
column=self.index_column + 2,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_passed_header.grid(row=self.index_row + 1,
column=self.index_column,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_failed_header.grid(row=self.index_row + 2,
column=self.index_column,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_passed_result.grid(row=self.index_row + 1,
column=self.index_column + 1,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_failed_result.grid(row=self.index_row + 2,
column=self.index_column + 1,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_passed_percentage_result.grid(row=self.index_row + 1,
column=self.index_column + 2,
sticky="nwes",
pady=1,
padx=1)
self.steps_table_failed_percentage_result.grid(row=self.index_row + 2,
column=self.index_column + 2,
sticky="nwes",
pady=1,
padx=1)
self.configure_cells()
def configure_cells(self):
# Configure rows and columns
self.columnconfigure(0, weight=1)
self.columnconfigure(5, weight=1)
self.rowconfigure(2, weight=1)
self.rowconfigure(0, minsize=40)
self.rowconfigure(5, minsize=40)
def update_table(self):
# set the variables to zero
passed = 0
failed = 0
# get the passed steps
for i in range(len(self.graphics.passed)):
passed += self.graphics.passed[i]
# get the failed steps
for i in range(len(self.graphics.failed)):
failed += self.graphics.failed[i]
# get the total number of steps
total = passed + failed
# calculate the percentages
passed_per = np.round((passed / total) * 100, 1)
failed_per = np.round((failed / total) * 100, 1)
# Update the table representation values
self.steps_table_passed_result.configure(text=passed)
self.steps_table_failed_result.configure(text=failed)
self.steps_table_passed_percentage_result.configure(text=passed_per)
self.steps_table_failed_percentage_result.configure(text=failed_per) | /sallust-0.15.13.tar.gz/sallust-0.15.13/Sallust/GUI/GraphicsWidgets/StepsTable.py | 0.687735 | 0.325253 | StepsTable.py | pypi |
import argparse
import struct
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Patch CRiSP map coordinate bounds.",
epilog="Note: due to silly compiler optimizations, the longitude might be off by +/- 0.00005, "
"but that's probably fine :)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument(
"-i",
default="crisphv3.exe",
metavar="INFILE",
help="input CRiSP executable filename",
)
parser.add_argument(
"-o",
default="crisphv3_new.exe",
metavar="OUTFILE",
help="output CRiSP executable filename",
)
parser.add_argument(
"--lat",
type=float,
nargs=2,
metavar=("MIN", "MAX"),
default=argparse.SUPPRESS,
help="latitude bounds",
required=True,
)
parser.add_argument(
"--lon",
type=float,
nargs=2,
metavar=("MIN", "MAX"),
default=argparse.SUPPRESS,
help="longitude bounds. Note that these are negated (degrees W)",
required=True,
)
args = parser.parse_args()
lat = args.lat
lon = args.lon
if lat[0] > lat[1]:
lat[0], lat[1] = lat[1], lat[0]
if lon[0] > lon[1]:
lon[0], lon[1] = lon[1], lon[0]
file_offset = 0x400C00
with open(args.i, "rb") as f:
s = bytearray(f.read())
def ud(b):
return struct.unpack("<d", b)[0]
def pd(d):
return struct.pack("<d", d)
def get4(addr):
return s[addr - file_offset : addr - file_offset + 4]
def put4(addr, b):
s[addr - file_offset : addr - file_offset + 4] = b
old_lat_lo = ud(get4(0x41FA13) + get4(0x41FA2B))
old_lat_hi = ud(get4(0x41FA1A) + get4(0x41FA32))
old_lon_lo = ud(get4(0x41F9FB) + get4(0x41FA00))
old_lon_hi = ud(get4(0x41F9FB) + get4(0x41FA24))
print(
f"old bounds (copy 1): lat=[{old_lat_lo}, {old_lat_hi}], lon=[{old_lon_lo}, {old_lon_hi}]"
)
old_lat_lo = ud(get4(0x41FA78) + get4(0x41FA90))
old_lat_hi = ud(get4(0x41FA7F) + get4(0x41FA97))
old_lon_lo = ud(get4(0x41F9FB) + get4(0x41FA00))
old_lon_hi = ud(get4(0x41F9FB) + get4(0x41FA89))
print(
f"old bounds (copy 2): lat=[{old_lat_lo}, {old_lat_hi}], lon=[{old_lon_lo}, {old_lon_hi}]"
)
# don't touch 0x41f9fb: it's shared between lon_lo and lon_hi
# fortunately, it's the least significant part of lon_lo and lon_hi
lon_lo = pd(lon[0])
put4(0x41FA00, lon_lo[4:])
lon_hi = pd(lon[1])
put4(0x41FA24, lon_hi[4:])
put4(0x41FA89, lon_hi[4:])
lat_lo = pd(lat[0])
put4(0x41FA13, lat_lo[:4])
put4(0x41FA2B, lat_lo[4:])
put4(0x41FA78, lat_lo[:4])
put4(0x41FA90, lat_lo[4:])
lat_hi = pd(lat[1])
put4(0x41FA1A, lat_hi[:4])
put4(0x41FA32, lat_hi[4:])
put4(0x41FA7F, lat_hi[:4])
put4(0x41FA97, lat_hi[4:])
new_lat_lo = ud(get4(0x41FA13) + get4(0x41FA2B))
new_lat_hi = ud(get4(0x41FA1A) + get4(0x41FA32))
new_lon_lo = ud(get4(0x41F9FB) + get4(0x41FA00))
new_lon_hi = ud(get4(0x41F9FB) + get4(0x41FA24))
print(
f"new bounds (copy 1): lat=[{new_lat_lo}, {new_lat_hi}], lon=[{new_lon_lo}, {new_lon_hi}]"
)
new_lat_lo = ud(get4(0x41FA78) + get4(0x41FA90))
new_lat_hi = ud(get4(0x41FA7F) + get4(0x41FA97))
new_lon_lo = ud(get4(0x41F9FB) + get4(0x41FA00))
new_lon_hi = ud(get4(0x41F9FB) + get4(0x41FA89))
print(
f"new bounds (copy 2): lat=[{new_lat_lo}, {new_lat_hi}], lon=[{new_lon_lo}, {new_lon_hi}]"
)
with open(args.o, "wb") as f:
f.write(s) | /salmon_lib-0.3.4.tar.gz/salmon_lib-0.3.4/salmon_lib/bound_patcher.py | 0.516839 | 0.208461 | bound_patcher.py | pypi |
from abc import ABC, abstractmethod
from dataclasses import dataclass, replace, field
from typing import List, Tuple
import struct
def u32(b):
return struct.unpack("<I", b)[0]
def p32(x):
return struct.pack("<I", x)
def prefix_length(b):
return p32(len(b)) + b
class Buffer:
def __init__(self, b):
self.raw = b
self.fp = 0
def seek(self, n):
self.fp = n
def read(self, n):
ret = self.raw[self.fp : self.fp + n]
self.fp += n
return ret
def read_int(self):
return u32(self.read(4))
def read_str(self):
length = self.read_int()
return self.read(length)
class Serializable(ABC):
@classmethod
@abstractmethod
def parse(cls, b: Buffer): # parse from the current b.fp and advance b.fp
pass
@abstractmethod
def serialize(self):
pass
@dataclass
class Style(Serializable):
start: int
end: int
type: int
extra_info: int = 0
STYLE_LINK = 0x01
STYLE_BOLD = 0x03
STYLE_ITALICS = 0x04
STYLE_MONO = 0x06
STYLE_H1 = 0x08
STYLE_H2 = 0x09
STYLE_H3 = 0x0A
STYLE_H4 = 0x0B
STYLE_H5 = 0x0C
STYLE_H6 = 0x0D
STYLE_IMAGE = 0x0F
STYLE_OL = 0x11
STYLE_UL = 0x15
STYLE_CODEBLOCK = 0x20
@classmethod
def parse(cls, b):
start = b.read_int()
end = b.read_int()
type = b.read_int()
extra_info = b.read_int()
return cls(start=start, end=end, type=type, extra_info=extra_info)
def serialize(self):
return p32(self.start) + p32(self.end) + p32(self.type) + p32(self.extra_info)
@dataclass
class Line(Serializable):
text: bytes = b""
unknown_fields: Tuple[int, ...] = (0, 0, 0, 0, 0, 0, 0, 0)
styles: List[Style] = field(default_factory=list)
FIELDS_HRULE = (1, 0, 0, 0, 0, 1, 0, 0)
FIELDS_OL_FIRST = (0, 1, 1, 0, 1, 0, 0, 1)
FIELDS_OL_CONT = (0, 1, 1, 0, 1, 0, 0, 0)
FIELDS_UL_FIRST = (0, 1, 1, 1, 0, 0, 0, 1)
FIELDS_UL_CONT = (0, 1, 1, 1, 0, 0, 0, 0)
@classmethod
def parse(cls, b):
text_length = b.read_int()
unknown_fields = []
for j in range(8):
unknown_fields.append(b.read_int())
unknown_fields = tuple(unknown_fields)
text = b.read(text_length)
styles = []
while True:
style = Style.parse(b)
if style.start == 0xFFFF:
break
styles.append(style)
return cls(text=text, unknown_fields=unknown_fields, styles=styles)
def serialize(self):
b = b""
b += p32(len(self.text))
for i in self.unknown_fields:
b += p32(i)
b += self.text
for i in self.styles:
b += i.serialize()
b += b"\xff\xff\x00\x00" * 4
return b
# takes the unknown_fields of the left argument
def __add__(self, other):
text = self.text + other.text
unknown_fields = self.unknown_fields
styles = []
for i in self.styles:
styles.append(replace(i)) # copy?
for i in other.styles:
start = i.start + len(self.text)
end = i.end + len(self.text)
styles.append(replace(i, start=start, end=end))
return Line(text=text, unknown_fields=unknown_fields, styles=styles)
@dataclass
class Page(Serializable):
title: bytes
title_id: int # ???
page_name: bytes
page_id: int # ???
lines: List[Line]
@classmethod
def parse(cls, b):
b.read(4) # length (ignored)
num_lines = b.read_int()
title = b.read_str()
title_id = b.read_int()
page_name = b.read_str()
page_id = b.read_int()
lines = []
for i in range(num_lines):
lines.append(Line.parse(b))
return cls(
title=title,
title_id=title_id,
page_name=page_name,
page_id=page_id,
lines=lines,
)
def serialize(self):
b = b""
b += p32(len(self.lines))
b += prefix_length(self.title)
b += p32(self.title_id)
b += prefix_length(self.page_name)
b += p32(self.page_id)
for i in self.lines:
b += i.serialize()
return prefix_length(b)
@dataclass
class Image(Serializable):
data: bytes
@classmethod
def parse(cls, b):
l = b.read_int()
assert b.read_int() == 0 # ???
data = b.read(l - 4)
return cls(data=data)
def serialize(self):
return prefix_length(p32(0) + self.data)
@dataclass
class TOCEntry(Serializable):
title: bytes
id: int
@classmethod
def parse(cls, b):
title = b.read_str()
title_id = b.read_int()
return cls(title=title, id=title_id)
def serialize(self):
return prefix_length(self.title) + p32(self.id)
@dataclass
class TOC(Serializable):
toc: List[TOCEntry]
@classmethod
def parse(cls, b):
toc = []
b.read(4) # length (ignored)
num_toc = b.read_int()
for i in range(num_toc):
toc.append(TOCEntry.parse(b))
return cls(toc=toc)
def serialize(self):
b = b""
for i in self.toc:
b += i.serialize()
b = p32(len(self.toc)) + b
return prefix_length(b)
@dataclass
class Link(Serializable):
link: bytes
id: int
@classmethod
def parse(cls, b):
link = b.read_str()
link_id = b.read_int()
return cls(link=link, id=link_id)
def serialize(self):
return prefix_length(self.link) + p32(self.id)
@dataclass
class Links(Serializable):
links: List[Link]
@classmethod
def parse(cls, b):
links = []
b.read(4) # length (ignored)
num_links = b.read_int()
for i in range(num_links):
links.append(Link.parse(b))
return cls(links=links)
def serialize(self):
b = b""
for i in self.links:
b += i.serialize()
b = p32(len(self.links)) + b
return prefix_length(b)
@dataclass
class DirectoryEntry(Serializable):
id: int
offset: int
type: int
@classmethod
def parse(cls, b):
id = b.read_int()
offset = b.read_int()
type = b.read_int()
return cls(id=id, offset=offset, type=type)
def serialize(self):
return p32(self.id) + p32(self.offset) + p32(self.type)
@dataclass
class Directory(Serializable):
directory: List[DirectoryEntry]
@classmethod
def parse(cls, b):
directory = []
num_dir = b.read_int()
for i in range(num_dir):
directory.append(DirectoryEntry.parse(b))
return cls(directory=directory)
def serialize(self):
b = b""
for i in self.directory:
b += i.serialize()
b += b"\xff\xff\x00\x00" * 4
return p32(len(self.directory)) + b
@dataclass
class ZHP(Serializable):
pages: dict
images: dict
toc: TOC
links: Links
@classmethod
def parse(cls, b):
if type(b) == bytes:
b = Buffer(b)
assert b.read(12) == b"ZHELP10000\x01\x00"
l = b.read_int()
b.seek(l)
directory = Directory.parse(b)
pages = {}
images = {}
toc = None
links = None
for d in directory.directory:
b.seek(d.offset)
if d.type == 1:
pages[d.id] = Page.parse(b)
if d.type == 2:
images[d.id] = Image.parse(b)
if d.type == 3:
toc = TOC.parse(b)
if d.type == 4:
links = Links.parse(b)
return cls(pages=pages, images=images, toc=toc, links=links)
def serialize(self):
header = b"ZHELP10000\x01\x00"
entries = b""
directory = []
header_offset = len(header) + 4
# pages
for d_id, page in self.pages.items():
d_offset = header_offset + len(entries)
entries += page.serialize()
directory.append(DirectoryEntry(id=d_id, offset=d_offset, type=1))
# images
for d_id, image in self.images.items():
d_offset = header_offset + len(entries)
entries += image.serialize()
directory.append(DirectoryEntry(id=d_id, offset=d_offset, type=2))
# toc
d_offset = header_offset + len(entries)
entries += self.toc.serialize()
directory.append(DirectoryEntry(id=0, offset=d_offset, type=3))
# links
d_offset = header_offset + len(entries)
entries += self.links.serialize()
directory.append(DirectoryEntry(id=0, offset=d_offset, type=4))
directory = Directory(directory=directory).serialize()
header += p32(len(header) + 4 + len(entries)) # directory offset
return header + entries + directory | /salmon_lib-0.3.4.tar.gz/salmon_lib-0.3.4/salmon_lib/zhp.py | 0.71103 | 0.298312 | zhp.py | pypi |
import numpy as np
import math
from abc import ABC, abstractmethod
from .model import LinearModel
from .comparison import _extract_dfs
from .expression import Constant
class Score(ABC):
def __init__(self, model, higher_is_better):
self.higher_is_better = higher_is_better
self.model = model
if model is None:
self._score = np.inf * (-1 if higher_is_better else 1)
else:
self._score = self.compute()
@abstractmethod
def compute(self):
pass
def __str__(self):
return "{} | {}".format(type(self).__name__, self._score)
def compare(self, other):
''' Return true if self is better than other based on 'higher_is_better' '''
assert(type(self) is type(other)) # make sure we are not comparing different types of scores
if self.higher_is_better:
return self._score < other._score
else:
return self._score > other._score
class RSquared(Score):
def __init__(self, model, adjusted=False):
self.adjusted=adjusted
super(RSquared, self).__init__(
model=model,
higher_is_better=True
)
def __str__(self):
return "R^2 ({}adjusted) | {}".format("" if self.adjusted else "un", self._score)
def compute(self):
''' Calculate the (adjusted) R^2 value of the model.
Arguments:
X - An optional DataFrame of the explanatory data to be used for calculating R^2. Default is the training data.
Y - An optional DataFrame of the response data to be used for calculating R^2. Default is the training data.
adjusted - A boolean indicating if the R^2 value is adjusted (True) or not (False).
Returns:
A real value of the computed R^2 value.
'''
X = self.model.training_data
y = self.model.training_y
pred = self.model.predict(X)
sse = ((y.iloc[:, 0] - pred.iloc[:, 0]) ** 2).sum()
ssto = ((y.iloc[:, 0] - y.iloc[:, 0].mean()) ** 2).sum()
if self.adjusted:
numerator = sse
denominator = ssto
else:
numerator = sse / (len(y) - len(self.model.training_x.columns) - 2)
denominator = ssto / (len(y) - 1)
return 1 - numerator / denominator
class MSE(Score):
def __init__(self, model):
super(MSE, self).__init__(
model=model,
higher_is_better=False
)
def compute(self):
dfs = _extract_dfs(self.model, dict_out=True)
sse = self.model.get_sse()
return sse / dfs["error_df"]
class MallowsCp(Score):
def __init__(self, model):
super(MallowsCp, self).__init__(
model=model,
higher_is_better=False,
)
def compute(self):
dfs = _extract_dfs(self.model, dict_out=True)
sse = self.model.get_sse()
sigma_sq = self.model.std_err_est ** 2
n, p = self.model.n, self.model.p
return sse / sigma_sq - n + (2 * p)
class AIC(Score):
def __init__(self, model):
super(AIC, self).__init__(
model=model,
higher_is_better=False,
)
def compute(self):
p = self.model.p
log_likelihood = self.model.log_likelihood()
return 2 * (p - log_likelihood)
class BIC(Score):
def __init__(self, model):
super(BIC, self).__init__(
model=model,
higher_is_better=False,
)
def compute(self):
n, p = self.model.n, self.model.p
log_likelihood = self.model.log_likelihood()
return math.log(n) * p - 2 * log_likelihood
_metrics = dict(
r_squared=RSquared,
r_squared_adjusted=lambda model: RSquared(model=model, adjusted=True),
mse=MSE,
cp=MallowsCp,
aic=AIC,
bic=BIC,
)
def stepwise(full_model, metric_name, forward=False, naive=False, data=None, verbose=False):
if data is not None:
full_model.fit(data)
metric_name = metric_name.lower()
ex_terms = full_model.ex
re_term = full_model.re
data = full_model.training_data
if ex_terms is None or re_term is None:
raise AssertionError("The full model must be fit prior to undergoing a stepwise procedure.")
if metric_name not in _metrics:
raise KeyError("Metric '{}' not supported. The following metrics are supported: {}".format(
metric_name,
list(_metrics.keys())
))
metric_func = _metrics[metric_name]
ex_term_list = ex_terms.get_terms()
if forward:
best_model = LinearModel(Constant(1), re_term)
best_model.fit(data)
else:
best_model = full_model
best_metric = metric_func(best_model)
while len(ex_term_list) > 0:
best_potential_metric = metric_func(None)
best_potential_model = None
best_idx = None
if forward and not naive:
ex_term_list_expression = None
for t in ex_term_list:
if ex_term_list_expression is None:
ex_term_list_expression = t
else:
ex_term_list_expression = ex_term_list_expression + t
leaves = set(term for term in ex_term_list if not term.contains(ex_term_list_expression - term)) # Find all terms that do not depend on other terms
for i, term in enumerate(ex_term_list):
try:
if forward:
# validate if adding term is valid
if not naive:
if term not in leaves:
continue
potential_model = LinearModel(best_model.given_ex + term, re_term)
else:
# validate if removing term is valid
if not naive:
if (best_model.given_ex - term).contains(term):
continue
potential_model = LinearModel(best_model.given_ex - term, re_term)
potential_model.fit(data)
potential_metric = metric_func(potential_model)
if best_potential_metric.compare(potential_metric):
best_potential_metric = potential_metric
best_potential_model = potential_model
best_idx = i
if verbose:
print(potential_model)
print(potential_metric)
print("Current best potential model" if best_idx == i else "Not current best potential")
print()
except np.linalg.linalg.LinAlgError:
continue
if best_metric.compare(best_potential_metric):
best_metric = best_potential_metric
best_model = best_potential_model
if verbose:
print("!!! New model found. Now including", ex_term_list[best_idx])
print()
del ex_term_list[best_idx]
else:
if verbose:
print("!!! No potential models better than prior. Exiting search.")
print()
break
else:
if verbose:
print("!!! Exhausted all potential terms. None left to consider.")
return dict(
forward=forward,
metric=best_metric,
metric_name=metric_name,
best_model=best_model
) | /salmon_linear_modeling-1.0.0-py3-none-any.whl/salmon/building.py | 0.875268 | 0.481698 | building.py | pypi |
from .model import *
from scipy.stats import f
import numpy as np
import pandas as pd
def anova(model1, model2 = None):
''' User-facing function to execute an Analysis of Variance for one or two models.
Should only model be given, then a general F-test will be executed on all of the coefficients.
Should two models be given, then a partial F-test will be executed. Note that one model needs to be a subset of the other for this to properly evaluate.
Arguments:
model1 - A Model object that has been fit on some data
model2 - A Model object that has been fit on some data
Returns:
A DataFrame that contains relevant statistics for the test performed
'''
if model2 is None:
return _anova_terms(model1)
elif is_subset(model1, model2):
return _anova_models(model1, model2)
elif is_subset(model2, model1):
return _anova_models(model2, model1)
else:
raise Exception("Parameters must either be one model or two models where one is a subset of the other.")
def is_subset(model1, model2):
''' Checks if model1 contains all the terms of model2. In other words, checks if model2 is a subset of model1.
Arguments:
model1 - A Model object that has been fit on some data.
model2 - A Model object that has been fit on some data.
Returns:
A boolean value that is True if model2 is a subset of model1, False if model2 is not a subset of model1.
'''
if not model1.given_re.__sim__(model2.given_re):
# Models should both have the same response variable
return False
terms1 = set(model1.ex.get_terms())
terms2 = set(model2.ex.get_terms())
return terms2.issubset(terms1)
def _calc_stats(numer_ss, numer_df, denom_ss, denom_df):
''' Given the appropriate sum of squares for the numerator and the mean sum
of squares for the denominator (with respective degrees of freedom) this will
return the relevant statistics of an F-test.
Arguments:
numer_ss - Sum of squares for the numerator.
numer_df - Degrees of freedom for the numerator.
denom_ms - Mean sum of squares for the denominator.
denom_df - Degrees of freedom for the denominator.
Returns:
A tuple of three values.
Element 0 contains the mean sum of squares for the numerator.
Element 1 contains the F statistic calculated.
Element 2 contains the associated p-value for the generated F statistic.
'''
numer_ms = numer_ss / numer_df
denom_ms = denom_ss / denom_df
f_val = numer_ms / denom_ms
p_val = 1 - f.cdf(f_val, numer_df, denom_df)
if p_val < 1.12e-16:
p_val = 0.0 # 1.11e-16 is the limit of scipy's precision
return f_val, p_val
def _process_term(orig_model, term):
''' Obtains needed sum of squared residuals of a model fitted without a specified term/coefficient.
Arguments:
orig_model - A fitted Model object.
term - A Variable object to be left out of the original model when fitting.
Returns:
A real value indicated the sum of squared residuals.
'''
new_model = LinearModel(orig_model.given_ex - term, orig_model.given_re)
new_model.fit(orig_model.training_data)
return new_model.get_sse(), new_model.get_ssr()
def _extract_dfs(model, dict_out=False):
''' Obtains the different degrees of freedom for a model in reference to an F-test.
Arguments:
model - A fitted Model object
Returns:
A tuple containing three elements.
Element 0 contains the degrees of freedom for the explantory variables.
Element 1 contains the degrees of freedom for the residuals.
Element 2 contains the total degrees of freedom for the model.
'''
reg_df = model.ex.get_dof()
total_df = len(model.training_x) - 1
error_df = total_df - reg_df
if dict_out:
return dict(
model_df=reg_df,
total_df=total_df,
error_df=error_df
)
else:
return reg_df, error_df, total_df
def _anova_terms(model):
''' Perform a global F-test by analyzing all possible models when you leave one coefficient out while fitting.
Arguments:
model - A fitted model object.
Returns:
A DataFrame object that contains the degrees of freedom, adjusted sum of squares,
adjusted mean sum of squares, F values, and p values for the associated tests performed.
'''
full_reg_df, full_error_df, total_df = _extract_dfs(model)
# Full model values
full_sse = model.get_sse() # sum of squared errors
full_ssr = model.get_ssr() # sum of squares explained by model
full_sst = model.get_sst()
global_f_val, global_p_val = _calc_stats(full_ssr, full_reg_df, full_sse, full_error_df)
# Calculate the general terms now
indices = ["Global Test"]
sses = [full_ssr]
ssrs = [full_ssr]
f_vals = [global_f_val]
p_vals = [global_p_val]
dfs = [full_reg_df]
terms = model.ex.get_terms()
for term in terms:
term_df = term.get_dof()
reduced_sse, reduced_ssr = _process_term(model, term)
reduced_f_val, reduced_p_val = _calc_stats(full_ssr - reduced_ssr, term_df, full_sse, full_error_df)
indices.append("- " + str(term))
sses.append(reduced_sse)
ssrs.append(reduced_ssr)
dfs.append(term_df)
f_vals.append(reduced_f_val)
p_vals.append(reduced_p_val)
# Finish off the dataframe's values
indices.append("Error")
sses.append("")
ssrs.append("")
dfs.append(full_error_df)
f_vals.append("")
p_vals.append("")
return pd.DataFrame({
"DF" : dfs,
"SS Err.": sses,
"SS Reg." : ssrs,
"F" : f_vals,
"p" : p_vals
}, index = indices, columns = ["DF", "SS Err.", "SS Reg.", "F", "p"])
def _anova_models(full_model, reduced_model):
''' Performs a partial F-test to compare two models.
Arguments:
full_model - A fitted Model object.
reduced_model - A fitted Model object that is a subset of the full_model.
Returns:
A DataFrame object that contains the resdiuals' degrees of freedom, sum of squares of the regression,
degrees of freedom for the model, sum of squared residuals, the F value, and the p value for the associated test performed.
'''
full_label = str(full_model)
reduced_label = str(reduced_model)
f_reg_df, f_error_df, f_total_df = _extract_dfs(full_model)
r_reg_df, r_error_df, r_total_df = _extract_dfs(reduced_model)
f_sse, f_ssr = full_model.get_sse(), full_model.get_ssr()
r_sse, r_ssr = reduced_model.get_sse(), reduced_model.get_ssr()
f_val, p_val = _calc_stats(r_sse - f_sse, r_error_df - f_error_df, f_sse, f_error_df)
indices = ["Full Model", "- Reduced Model", "Error"]#[reduced_label, full_label]
df = [f_reg_df, f_reg_df - r_reg_df, f_error_df]
ssrs = [f_ssr, r_ssr, ""]
sses = [f_sse, r_sse, ""]
f = ["", f_val, ""]
p = ["", p_val, ""]
return pd.DataFrame({
"DF" : df,
"SS Err." : sses,
"SS Reg." : ssrs,
"F" : f,
"p" : p},
index = indices, columns = ["DF", "SS Err.", "SS Reg.", "F", "p"]) | /salmon_linear_modeling-1.0.0-py3-none-any.whl/salmon/comparison.py | 0.893548 | 0.723273 | comparison.py | pypi |
import pandas as pd
from pandas.plotting import scatter_matrix
import numpy as np
import scipy.stats as stats
import matplotlib.pyplot as plt
from itertools import product
import math
from .expression import Expression, Var, Quantitative, Categorical, Interaction, Combination, Identity, Constant
plt.style.use('ggplot')
def _float_format(x):
abs_x = abs(x)
if abs_x >= 1e4:
rep = "{:.3e}".format(x)
elif abs_x >= 1e0:
rep = ("{:." + str(3 - int(math.floor(math.log10(abs_x)))) + "f}").format(x)
elif abs_x >= 1e-3:
rep = "{:.4f}".format(x)
elif abs_x >= 1e-9:
rep = "{:.3e}".format(x)
elif abs_x >= 1e-99:
rep = "{:.1e}".format(x)
else:
rep = "{:.0e}".format(x)
return rep.replace("e-0", "e-").replace("e+0", "e+")
pd.set_option("display.float_format", _float_format)
class Model:
''' A general Model class that both Linear models and (in the future) General Linear models stem from. '''
def __init__(self):
''' Create a Model object (only possible through inheritance). '''
raise NotImplementedError()
def fit(self, data):
''' Fit a model to given data.
Arguments:
data - A DataFrame with column names matching specified terms within the Model's explanatory and response Expression objects.
Returns:
A DataFrame object with relevant statistics of fitted Model (coefficients, t statistics, p-values, etc.).
'''
raise NotImplementedError()
def predict(self, data):
''' Predict response values for a given set of data.
Arguments:
data - A DataFrame with column names matching specified terms within the Model's explanatory Expression object.
Returns:
A Series object of the predicted values.
'''
raise NotImplementedError()
def plot_matrix(self, **kwargs):
''' Produce a matrix of pairwise scatter plots of the data it was fit on. The diagonal of the matrix will feature
histograms instead of scatter plots.
Arguments:
kwargs - One or more named parameters that will be ingested by Pandas' scatter_matrix plotting function.
Returns:
A matplotlib plot object containing the matrix of scatter plots.
'''
df = pd.concat([self.training_x, self.training_y], axis = 1)
scatter_matrix(df, **kwargs)
class LinearModel(Model):
''' A specific kind of Model that assumes the response values are in a linearl relationship with the explantory variables. '''
def __init__(self, explanatory, response, intercept = True):
''' Create a LinearModel object.
If an intercept is not wanted, can either set intercept = False or subtract '1' from the explanatory Expression.
Arguments:
explanatory - An Expression object that is either a single term or a Combination of them. These are the X's.
response - An Expression object that represents the single term for the response variables. This is the Y.
If this is a Combination, they will be added together and treated as a single variable.
intercept - A boolean that indicates if an intercept is wanted (True) or not (False).
'''
if isinstance(explanatory, (int, float)):
explanatory = Constant(explanatory)
if intercept:
self.given_ex = explanatory + 1
else:
self.given_ex = explanatory
constant = self.given_ex.reduce()['Constant']
self.intercept = constant is not None
if self.intercept:
self.given_ex = self.given_ex - constant # This was done to easily check all options for indicating a wanted intercept
self.given_re = Identity(response) # This will collapse any combination of variables into a single column
self.ex = None
self.re = None
self.bhat = None
self.fitted = None
self.residuals = None
self.std_err_est = None
self.std_err_vars = None
self.var = None
self.t_vals = None
self.p_vals = None
self.training_data = None
self.training_x = None
self.training_y = None
self.categorical_levels = dict()
def __str__(self):
''' Convert a LinearModel to a str format for printing. '''
if self.intercept:
return str(self.given_re) + " ~ " + str(1 + self.given_ex)
else:
return str(self.given_re) + " ~ " + str(self.given_ex)
def fit(self, X, Y = None):
''' Exposed function for fitting a LinearModel. Can either give one DataFrame that contains both
response and explanatory variables or separate ones. This is done to interface into the sklearn ecosystem.
It is worth noting that it is fine to have extra columns that are not used by the model - they will just be ignored.
Arugments:
X - A DataFrame object that contains either the response and explanatory data, or just the explanatory data.
Y - An optional DataFrame object that contains the response data.
Returns:
A DataFrame object with relevant statistics of fitted Model (coefficients, t statistics, p-values, etc.).
'''
if Y is None:
data = X
else:
data = pd.concat([X,Y], axis = 1)
return self._fit(data)
def _fit_intercept_only(self, data):
# Construct X matrix
self.ex = Constant(1)
X = self.ex.evaluate(data)
X.columns = ["Intercept"]
self.training_x = X
# Construct Y vector
y = self.re.evaluate(data)
y_mean = y.mean()
self.training_y = y
# Solve equation
self.bhat = pd.DataFrame(np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y)),
index=X.columns, columns=["Coefficients"])
n = X.shape[0]
p = X.shape[1] - (1 if self.intercept else 0)
self.n, self.p = n, p
# Y_Hat and Residuals
self.fitted = pd.DataFrame({"Fitted": np.dot(X, self.bhat).sum(axis=1)})
self.residuals = pd.DataFrame({"Residuals": y.iloc[:, 0] - self.fitted.iloc[:, 0]})
# Sigma
self.std_err_est = ((self.residuals["Residuals"] ** 2).sum() / (n - p - 1)) ** 0.5
# Covariance Matrix
self.var = np.linalg.solve(np.dot(X.T, X),
(self.std_err_est ** 2) * np.identity(X.shape[1]))
# Coefficient SE, Diagonal of Cov. Matrix
self.std_err_vars = pd.DataFrame({"SE": (np.diagonal(self.var)) ** 0.5},
index=self.bhat.index)
# format the covariance matrix
self.var = pd.DataFrame(self.var, columns=X.columns, index=X.columns)
# Coefficient Inference
self.t_vals = pd.DataFrame({"t": self.bhat["Coefficients"] / self.std_err_vars["SE"]})
self.p_vals = pd.DataFrame({"p": pd.Series(2 * stats.t.cdf(-abs(self.t_vals["t"]), n - p - 1),
index=self.bhat.index)})
ret_val = pd.concat([self.bhat, self.std_err_vars, self.t_vals, self.p_vals], axis=1)
return ret_val
def _fit(self, data):
''' Helper function for fitting a model with given data.
Arguments:
data - A DataFrame object containing the explanatory and response columns (amongst potentially extraneous columns as well).
Returns:
A DataFrame object with relevant statistics of fitted Model (coefficients, t statistics, p-values, etc.).
'''
# Initialize the categorical levels
self.categorical_levels = dict()
self.training_data = data
# Replace all Var's with either Q's or C's
self.re = self.given_re.copy()
self.re = self.re.interpret(data)
if self.given_ex == 0:
return self._fit_intercept_only(data)
self.ex = self.given_ex.copy()
self.ex = self.ex.interpret(data)
terms = self.ex.reduce()
# Construct X matrix
X = self.ex.evaluate(data)
X_means = X.mean()
self.training_x = X
self.training_x_means = X_means
# Construct Y vector
y = self.re.evaluate(data)
y_mean = y.mean()
self.training_y = y
self.training_y_mean = y_mean
# Center if there is an intercept
if self.intercept:
X = X - X_means
y = y - y_mean
# Solve equation
self.bhat = pd.DataFrame(np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y)),
index=X.columns, columns=["Coefficients"])
if self.intercept:
self.bhat.loc["Intercept"] = [y_mean[0] - X_means.dot(self.bhat)[0]]
X = X + X_means
X['Intercept'] = 1
y = y + y_mean
n = X.shape[0]
p = X.shape[1]
self.n, self.p = n, p
# Y_Hat and Residuals
self.fitted = pd.DataFrame({"Fitted" : np.dot(X, self.bhat).sum(axis = 1)})
self.residuals = pd.DataFrame({"Residuals" : y.iloc[:,0] - self.fitted.iloc[:,0]})
# Sigma Hat
self.std_err_est = ((self.residuals["Residuals"] ** 2).sum() / (n - p)) ** 0.5
# Covariance Matrix
self.var = np.linalg.solve(np.dot(X.T, X),
(self.std_err_est ** 2) * np.identity(X.shape[1]))
# Coefficient SE, Diagonal of Cov. Matrix
self.std_err_vars = pd.DataFrame({"SE": (np.diagonal(self.var)) ** 0.5},
index=self.bhat.index)
# format the covariance matrix
self.var = pd.DataFrame(self.var, columns=X.columns, index=X.columns)
# Coefficient Inference
self.t_vals = pd.DataFrame({"t": self.bhat["Coefficients"] / self.std_err_vars["SE"]})
self.p_vals = pd.DataFrame({"p": pd.Series(2 * stats.t.cdf(-abs(self.t_vals["t"]), n - p),
index=self.bhat.index)})
ci_width = stats.t.ppf(q=0.975, df=n-p)
self.lower_conf = pd.DataFrame({"2.5% CI": self.bhat["Coefficients"] - ci_width*self.std_err_vars["SE"]})
self.upper_conf = pd.DataFrame({"97.5% CI": self.bhat["Coefficients"] + ci_width*self.std_err_vars["SE"]})
ret_val = pd.concat([self.bhat, self.std_err_vars, self.t_vals, self.p_vals, self.lower_conf, self.upper_conf], axis = 1)
return ret_val
def likelihood(self, data=None):
''' Calculate likelihood for a fitted model on either original data or new data. '''
if data is None:
residuals = self.residuals.iloc[:, 0]
else:
y = self.re.evaluate(data)
y_hat = self.predict(data, for_plot=False, confidence_interval=False, prediction_interval=False)
residuals = y.iloc[:, 0] - y_hat.iloc[:, 0]
var = self.std_err_est ** 2
n = len(residuals)
return (2 * math.pi * var) ** (-n / 2) * math.exp(-1 / (2 * var) * (residuals ** 2).sum())
def log_likelihood(self, data=None):
''' Calculate a numerically stable log_likelihood for a fitted model on either original data or new data. '''
if data is None:
residuals = self.residuals.iloc[:, 0]
else:
y = self.re.evaluate(data)
y_hat = self.predict(data, for_plot=False, confidence_interval=False, prediction_interval=False)
residuals = y.iloc[:, 0] - y_hat.iloc[:, 0]
var = self.std_err_est ** 2
n = len(residuals)
return (-n / 2) * (math.log(2 * math.pi) + 2 * math.log(self.std_err_est)) - (1 / (2 * var)) * (residuals ** 2).sum()
def confidence_intervals(self, alpha = None, conf = None):
''' Calculate confidence intervals for fitted coefficients. Model must be fitted prior to execution.
Arguments:
alpha - A real value denoting the alpha of the confidence interval. CI Width = 1 - alpha / 2.
conf - A real value denoting the confidecne interval width.
Only one or the other of alpha or conf needs to be specified.
If neither are, a default value of conf = 0.95 will be used.
Returns:
A DataFrame object containing the appropriate confidence intervals for all the coefficients.
'''
if alpha is None:
if conf is None:
conf = 0.95
alpha = 1 - conf
crit_prob = 1 - (alpha / 2)
df = self.training_x.shape[0] - self.bhat.shape[0] # n - p
crit_value = stats.t.ppf(crit_prob, df)
se_vals = self.std_err_vars["SE"]
width = crit_value * se_vals
lower_bound = self.bhat["Coefficients"] - width
upper_bound = self.bhat["Coefficients"] + width
return pd.DataFrame({str(round(1 - crit_prob, 5) * 100) + "%" : lower_bound,
str(round(crit_prob, 5) * 100) + "%" : upper_bound})#,
#index = self.bhat.index)
def predict(self, data, for_plot = False, confidence_interval = False, prediction_interval = False):
''' Predict response values given some data for a fitted model.
Arguments:
data - A DataFrame object containing the explanatory values to base predictions off of.
for_plot - A boolean flag to indicate if these predictions are computed for the purposes of plotting.
confidence_interval - A real value indicating the width of confidence intervals for the prediction.
If not intervals are wanted, parameter is set to False.
prediction_interval - A real value indicating the width of prediction intervals for the prediction.
If not intervals are wanted, parameter is set to False.
Returns:
A DataFrame object containing the appropriate predictions and intervals.
'''
# Construct the X matrix
X = self.ex.evaluate(data, fit = False)
if self.intercept:
X['Intercept'] = 1
y_vals = X.dot(self.bhat).sum(axis = 1)
predictions = pd.DataFrame({"Predicted " + str(self.re) : y_vals})
if confidence_interval or prediction_interval:
if confidence_interval:
alpha = confidence_interval
widths = self._confidence_interval_width(X, confidence_interval)
else:
alpha = prediction_interval
widths = self._prediction_interval_width(X, prediction_interval)
crit_prob = 1 - (alpha / 2)
lower = y_vals - widths
upper = y_vals + widths
predictions[str(round(1 - crit_prob, 5) * 100) + "%"] = lower
predictions[str(round(crit_prob, 5) * 100) + "%"] = upper
return predictions
def get_sse(self):
''' Get the SSE of a fitted model. '''
sse = ((self.training_y.iloc[:,0] - self.fitted.iloc[:,0]) ** 2).sum()
return sse
def get_ssr(self):
''' Get the SSR of a fitted model. '''
ssr = self.get_sst() - self.get_sse()
return ssr
def get_sst(self):
''' Get the SST of a fitted model. '''
sst = ((self.training_y.iloc[:,0] - self.training_y.iloc[:,0].mean()) ** 2).sum()
return sst
def r_squared(self, X = None, y = None, adjusted = False, **kwargs):
''' Calculate the (adjusted) R^2 value of the model.
This can be used as a metric within the sklearn ecosystem.
Arguments:
X - An optional DataFrame of the explanatory data to be used for calculating R^2. Default is the training data.
Y - An optional DataFrame of the response data to be used for calculating R^2. Default is the training data.
adjusted - A boolean indicating if the R^2 value is adjusted (True) or not (False).
Returns:
A real value of the computed R^2 value.
'''
# Allow interfacing with sklearn's cross fold validation
#self.fit(X, y)
if X is None:
X = self.training_data
if y is None:
y = self.training_y
pred = self.predict(X)
sse = ((y.iloc[:,0] - pred.iloc[:,0]) ** 2).sum()
ssto = ((y.iloc[:,0] - y.iloc[:,0].mean()) ** 2).sum()
if adjusted:
numerator = sse
denominator = ssto
else:
numerator = sse / (len(y) - len(self.training_x.columns) - 2)
denominator = ssto / (len(y) - 1)
return 1 - numerator / denominator
def score(self, X = None, y = None, adjusted = False, **kwargs):
''' Wrapper for sklearn api for cross fold validation. See LinearModel.r_squared. '''
return self.r_squared(X, y, adjusted, **kwargs)
def _prediction_interval_width(self, X_new, alpha = 0.05):
''' Helper function for calculating prediction interval widths. '''
n = self.training_x.shape[0]
p = X_new.shape[1]
mse = self.get_sse() / (n - p)
s_yhat_squared = (X_new.dot(self.var) * X_new).sum(axis = 1) # X_new_vect * var * X_new_vect^T (equivalent to np.diag(X_new.dot(self.var).dot(X_new.T)))
s_pred_squared = mse + s_yhat_squared
t_crit = stats.t.ppf(1 - (alpha / 2), n-p)
return t_crit * (s_pred_squared ** 0.5)
def _confidence_interval_width(self, X_new, alpha = 0.05):
''' Helper function for calculating confidence interval widths. '''
n = self.training_x.shape[0]
p = X_new.shape[1]
s_yhat_squared = (X_new.dot(self.var) * X_new).sum(axis = 1) # X_new_vect * var * X_new_vect^T (equivalent to np.diag(X_new.dot(self.var).dot(X_new.T)))
#t_crit = stats.t.ppf(1 - (alpha / 2), n-p)
W_crit_squared = p * stats.f.ppf(1 - (alpha / 2), p, n-p)
return (W_crit_squared ** 0.5) * (s_yhat_squared ** 0.5)
def plot(
self,
categorize_residuals=True,
jitter=None,
confidence_band=False,
prediction_band=False,
original_y_space=True,
transformed_y_space=False,
alpha=0.5,
**kwargs
):
''' Visualizes the fitted LinearModel and its line of best fit.
Arguments:
categorize_residuals - A boolean that indicates if the residual points should be colored by categories (True) or not (False).
jitter - A boolean that indicates if residuals should be jittered in factor plots (True) or not (False).
confidence_band - A real value that specifies the width of the confidence band to be plotted. If band not desired, parameter is set to False.
prediction_band - A real value that specifies the width of the prediction band to be plotted. If band not desired, parameter is set to False.
y_space - A str that indicates the type of output space for the y-axis. If set to 't', the transformed space will be plotted.
If set to 'o', the original or untransformed space will be plotted. If set to 'b', both will be plotted side-by-side.
alpha - A real value that indicates the transparency of the residuals. Default is 0.5.
kwargs - Additional named parameters that will be passed onto lower level matplotlib plotting functions.
Returns:
A matplotlib plot appropriate visualization of the model.
'''
if confidence_band and prediction_band:
raise Exception("One one of {confidence_band, prediction_band} may be set to True at a time.")
terms = self.ex.reduce()
if original_y_space and transformed_y_space:
fig, (ax_o, ax_t) = plt.subplots(1, 2, **kwargs)
y_spaces = ['o', 't']
axs = [ax_o, ax_t]
elif transformed_y_space: # at least one of the two is False
fig, ax_t = plt.subplots(1,1, **kwargs)
y_spaces = ['t']
axs = [ax_t]
elif original_y_space:
fig, ax_o = plt.subplots(1,1, **kwargs)
y_spaces = ['o']
axs = [ax_o]
else:
raise AssertionError("At least one of either 'original_y_space' or 'transformed_y_space' should be True in model.plot(...) call.")
for y_space_type, ax in zip(y_spaces, axs):
original_y_space = y_space_type == "o"
# Redundant, we untransform in later function calls
# TODO: Fix later
y_vals = self.training_y[str(self.re)]
if original_y_space:
y_vals = self.re.untransform(y_vals)
# Plotting Details:
min_y = min(y_vals)
max_y = max(y_vals)
diff = (max_y - min_y) * 0.05
min_y = min(min_y - diff, min_y + diff) # Add a small buffer
max_y = max(max_y - diff, max_y + diff) # TODO: Check if min() and max() are necessary here
plot_args = {
"categorize_residuals": categorize_residuals,
"jitter": jitter,
"terms": terms,
"confidence_band": confidence_band,
"prediction_band": prediction_band,
"original_y_space": original_y_space,
"alpha": alpha,
"plot_objs": {
"figure": fig,
"ax": ax,
"y": {
"min": min_y,
"max": max_y,
"name": str(self.re)
}
}
}
if len(terms['Q']) == 1:
self._plot_one_quant(**plot_args)
elif len(terms['Q']) == 0 and len(terms['C']) > 0:
self._plot_zero_quant(**plot_args) # TODO Make function
else:
raise Exception("Plotting line of best fit only expressions that reference a single variable.")
return fig
def _plot_zero_quant(self, categorize_residuals, jitter, terms, confidence_band, prediction_band, original_y_space, alpha, plot_objs):
''' A helper function for plotting models in the case no quantitiative variables are present. '''
ax = plot_objs['ax']
unique_cats = list(terms['C'])
levels = [cat.levels for cat in unique_cats]
level_amounts = [len(level_ls) for level_ls in levels]
ml_index = level_amounts.index(max(level_amounts))
ml_cat = unique_cats[ml_index]
ml_levels = levels[ml_index]
cats_wo_most = unique_cats[:]
cats_wo_most.remove(ml_cat) # List of categorical variables without the ml_cat
levels_wo_most = levels[:]
levels_wo_most.remove(levels[ml_index]) # List of levels for categorical variables without the ml_cat
single_cat = len(cats_wo_most) == 0
if single_cat:
level_combinations = [None]
else:
level_combinations = product(*levels_wo_most) # Cartesian product
line_x = pd.DataFrame({str(ml_cat) : ml_levels}).reset_index() # To produce an index column to be used for the x-axis alignment
points = pd.merge(self.training_data, line_x, on = str(ml_cat))
plot_objs['x'] = {'name': 'index'}
points["<Y_RESIDS_TO_PLOT>"] = self.re.evaluate(points)
if original_y_space:
points["<Y_RESIDS_TO_PLOT>"] = self.re.untransform(points["<Y_RESIDS_TO_PLOT>"]) # Inefficient due to transforming, then untransforming. Need to refactor later.
plots = []
labels = []
linestyles = [':', '-.', '--', '-']
for combination in level_combinations:
points_indices = pd.Series([True] * len(points))
if not single_cat:
label = []
for element, var in zip(combination, cats_wo_most):
name = str(var)
line_x[name] = element
label.append(str(element))
points_indices = points_indices & (points[name] == element) # Filter out points that don't apply to categories
labels.append(", ".join(label))
line_type = linestyles.pop()
linestyles.insert(0, line_type)
line_y = self.predict(line_x, for_plot = True)
y_vals = line_y["Predicted " + plot_objs['y']['name']]
if original_y_space:
y_vals_to_plot = self.re.untransform(y_vals)
else:
y_vals_to_plot = y_vals
plot, = ax.plot(line_x.index, y_vals_to_plot, linestyle = line_type)
if jitter is None or jitter is True:
variability = np.random.normal(scale = 0.025, size = sum(points_indices))
else:
variability = 0
# Y values must come from points because earlier merge shuffles rows
ax.scatter(points.loc[points_indices, 'index'] + variability, points.loc[points_indices, "<Y_RESIDS_TO_PLOT>"], c = "black" if single_cat else plot.get_color(), alpha = alpha)
plots.append(plot)
if confidence_band:
self._plot_band(line_x, y_vals, plot.get_color(), original_y_space, plot_objs, True, confidence_band)
elif prediction_band:
self._plot_band(line_x, y_vals, plot.get_color(), original_y_space, plot_objs, False, prediction_band)
if not single_cat and len(cats_wo_most) > 0:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(plots, labels, title = ", ".join([str(cat) for cat in cats_wo_most]), loc = "center left", bbox_to_anchor=(1, 0.5))
ax.set_xlabel(str(ml_cat))
ax.set_xticks(line_x.index)
ax.set_xticklabels(line_x[str(ml_cat)])
ax.set_ylabel(plot_objs['y']['name'] if not original_y_space else self.re.untransform_name())
ax.grid()
ax.set_ylim([plot_objs['y']['min'], plot_objs['y']['max']])
def _plot_one_quant(self, categorize_residuals, jitter, terms, confidence_band, prediction_band, original_y_space, alpha, plot_objs):
''' A helper function for plotting models in the case only one quantitiative variable is present. Also support zero or more categorical variables.'''
x_term = next(iter(terms['Q'])) # Get the "first" and only element in the set
x_name = str(x_term)
x = self.training_data[x_name]
min_x = min(x)
max_x = max(x)
diff = (max_x - min_x) * 0.05
min_x = min(min_x - diff, min_x + diff) # Add a small buffer
max_x = max(max_x - diff, max_x + diff) # TODO: Check if min() and max() are necessary here
plot_objs['x'] = {"min" : min_x, "max" : max_x, "name" : x_name}
# Quantitative inputs
line_x = pd.DataFrame({x_name : np.linspace(min_x, max_x, 100)})
if len(terms['C']) == 0:
self._plot_one_quant_zero_cats(x, line_x, jitter, terms, confidence_band, prediction_band, original_y_space, alpha, plot_objs)
else:
self._plot_one_quant_some_cats(x, line_x, categorize_residuals, jitter, terms, confidence_band, prediction_band, original_y_space, alpha, plot_objs)
ax = plot_objs['ax']
ax.set_xlabel(x_name)
ax.set_ylabel(plot_objs['y']['name'] if not original_y_space else self.re.untransform_name())
ax.grid()
ax.set_xlim([min_x, max_x])
ax.set_ylim([plot_objs['y']['min'], plot_objs['y']['max']])
def _plot_one_quant_zero_cats(self, x, line_x, jitter, terms, confidence_band, prediction_band, original_y_space, alpha, plot_objs):
''' A helper function for plotting models in the case only one quantitiative variable and no categorical variables are present.'''
x_name = plot_objs['x']['name']
ax = plot_objs['ax']
line_y = self.predict(line_x)
y_vals = line_y["Predicted " + plot_objs['y']['name']]
if original_y_space:
y_vals_to_plot = self.re.untransform(y_vals)
else:
y_vals_to_plot = y_vals
line_fit, = ax.plot(line_x[x_name], y_vals_to_plot)
if confidence_band:
self._plot_band(line_x, y_vals, line_fit.get_color(), original_y_space, plot_objs, True, confidence_band)
elif prediction_band:
self._plot_band(line_x, y_vals, line_fit.get_color(), original_y_space, plot_objs, False, prediction_band)
training_y_vals = self.training_y[plot_objs['y']['name']]
if original_y_space:
training_y_vals = self.re.untransform(training_y_vals)
ax.scatter(x, training_y_vals, c = "black", alpha = alpha)
def _plot_band(self, line_x, y_vals, color, original_y_space, plot_objs, use_confidence = False, alpha = 0.05): # By default will plot prediction bands
''' A helper function to plot the confidence or prediction bands for a model. '''
x_name = plot_objs['x']['name']
X_new = self.ex.evaluate(line_x, fit = False)
if self.intercept:
X_new['Intercept'] = 1
if use_confidence:
widths = self._confidence_interval_width(X_new, alpha)
else:
widths = self._prediction_interval_width(X_new, alpha)
lower = y_vals - widths
upper = y_vals + widths
if original_y_space:
lower = self.re.untransform(lower)
upper = self.re.untransform(upper)
plot_objs['ax'].fill_between(x = line_x[x_name], y1 = lower, y2 = upper, color = color, alpha = 0.3)
def _plot_one_quant_some_cats(self, x, line_x, categorize_residuals, jitter, terms, confidence_band, prediction_band, original_y_space, alpha, plot_objs):
''' A helper function for plotting models in the case only one quantitiative variable and one or more categorical variables are present.'''
ax = plot_objs['ax']
x_name = plot_objs['x']['name']
y_name = plot_objs['y']['name']
plots = []
labels = []
linestyles = [':', '-.', '--', '-']
cats = list(terms['C'])
cat_names = [str(cat) for cat in cats]
levels = [cat.levels for cat in cats]
level_combinations = product(*levels) #cartesian product of all combinations
dummy_data = line_x.copy() # rest of columns set in next few lines
training_y_vals = self.training_y[y_name]
if original_y_space:
training_y_vals = self.re.untransform(training_y_vals)
for level_set in level_combinations:
label = [] # To be used in legend
for (cat,level) in zip(cats,level_set):
dummy_data[str(cat)] = level # set dummy data for prediction
label.append(str(level))
line_type = linestyles.pop() # rotate through line styles
linestyles.insert(0, line_type)
line_y = self.predict(dummy_data, for_plot = True)
y_vals = line_y["Predicted " + y_name]
if original_y_space:
y_vals_to_plot = self.re.untransform(y_vals)
else:
y_vals_to_plot = y_vals
plot, = ax.plot(dummy_data[x_name], y_vals_to_plot, linestyle = line_type)
plots.append(plot)
labels.append(", ".join(label))
if categorize_residuals:
indices_to_use = pd.Series([True] * len(x)) # gradually gets filtered out
for (cat,level) in zip(cats,level_set):
indices_to_use = indices_to_use & (self.training_data[str(cat)] == level)
ax.scatter(x[indices_to_use], training_y_vals[indices_to_use], c = plot.get_color(), alpha = alpha)
if confidence_band:
self._plot_band(dummy_data, y_vals, plot.get_color(), original_y_space, plot_objs, True, confidence_band)
elif prediction_band:
self._plot_band(dummy_data, y_vals, plot.get_color(), original_y_space, plot_objs, False, prediction_band)
# Legend
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(plots, labels, title = ", ".join(cat_names), loc = "center left", bbox_to_anchor = (1, 0.5))
if not categorize_residuals:
resids = ax.scatter(x, training_y_vals, c = "black", alpha = alpha)
def residual_plots(self, **kwargs):
''' Plot the residual plots of the model.
Arguments:
kwargs - Named parameters that will be passed onto lower level matplotlib plotting functions.
Returns:
A tuple containing the matplotlib (figure, list of axes) for the residual plots.
'''
terms = list(self.training_x)
fig, axs = plt.subplots(1, len(terms), **kwargs)
for term, ax in zip(terms, axs):
ax.scatter(self.training_x[str(term)], self.residuals['Residuals'])
ax.set_xlabel(str(term))
ax.set_ylabel("Residuals")
ax.set_title(str(term) + " v. Residuals")
ax.grid()
return fig, axs
def partial_plots(self, alpha = 0.5, **kwargs):
''' Plot the partial regression plots for the model
Arguments:
alpha - A real value indicating the transparency of the residuals. Default is 0.5.
kwargs - Named parameters that will be passed onto lower level matplotlib plotting functions.
Returns:
A tuple containing the matplotlib (figure, list of axes) for the partial plots.
'''
#terms = self.ex.flatten(separate_interactions = False)
terms = self.ex.get_terms()
fig, axs = plt.subplots(1, len(terms), **kwargs)
for i, ax in zip(range(0, len(terms)), axs):
xi = terms[i]
sans_xi = Combination(terms[:i] + terms[i+1:])
yaxis = LinearModel(sans_xi, self.re)
xaxis = LinearModel(sans_xi, xi)
yaxis.fit(self.training_data)
xaxis.fit(self.training_data)
ax.scatter(xaxis.residuals["Residuals"], yaxis.residuals["Residuals"], alpha = alpha)
ax.set_title("Leverage Plot for " + str(xi))
return fig, axs
# static method
def ones_column(data):
''' Helper function to create a column of ones for the intercept. '''
return pd.DataFrame({"Intercept" : np.repeat(1, data.shape[0])})
def residual_diagnostic_plots(self, **kwargs):
''' Produce a matrix of four diagnostic plots:
the residual v. quantile plot, the residual v. fited values plot, the histogram of residuals, and the residual v. order plot.
Arguments:
kwargs - Named parameters that will be passed onto lower level matplotlib plotting functions.
Returns:
A tuple containing the matplotlib (figure, list of axes) for the partial plots.
'''
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, **kwargs)
self.residual_quantile_plot(ax = ax1)
self.residual_fitted_plot(ax = ax2)
self.residual_histogram(ax = ax3)
self.residual_order_plot(ax = ax4)
f.suptitle("Residal Diagnostic Plots for " + str(self))
return f, (ax1, ax2, ax3, ax4)
def residual_quantile_plot(self, ax = None):
''' Produces the residual v. quantile plot of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
'''
if ax is None:
f, ax = plt.subplots(1,1)
stats.probplot(self.residuals["Residuals"], dist = "norm", plot = ax)
ax.set_title("Residual Q-Q Plot")
return ax
def residual_fitted_plot(self, ax = None):
''' Produces the residual v. fitted values plot of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
'''
if ax is None:
f, ax = plt.subplots(1,1)
ax.scatter(self.fitted["Fitted"], self.residuals["Residuals"])
ax.set_title("Fitted Values v. Residuals")
ax.set_xlabel("Fitted Value")
ax.set_ylabel("Residual")
return ax
def residual_histogram(self, ax = None):
''' Produces the residual histogram of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
'''
if ax is None:
f, ax = plt.subplots(1,1)
ax.hist(self.residuals["Residuals"])
ax.set_title("Histogram of Residuals")
ax.set_xlabel("Residual")
ax.set_ylabel("Frequency")
return ax
def residual_order_plot(self, ax = None):
''' Produces the residual v. order plot of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
'''
if ax is None:
f, ax = plt.subplots(1,1)
ax.plot(self.residuals.index, self.residuals["Residuals"], "o-")
ax.set_title("Order v. Residuals")
ax.set_xlabel("Row Index")
ax.set_ylabel("Residual")
return ax | /salmon_linear_modeling-1.0.0-py3-none-any.whl/salmon/model.py | 0.809238 | 0.590956 | model.py | pypi |
import numpy as np
import math
"""Contains the logic for automatic model building (i.e. stepwise regression)."""
from abc import ABC, abstractmethod
from ordered_set import OrderedSet
from .model import LinearModel
from .comparison import _extract_dfs
from .expression import Constant
class Score(ABC):
"""Generic metric object for model evaluation."""
def __init__(self, model, higher_is_better):
self.higher_is_better = higher_is_better
self.model = model
if model is None:
self._score = np.inf * (-1 if higher_is_better else 1)
else:
self._score = self.compute()
@abstractmethod
def compute(self):
pass
def __str__(self):
return "{} | {}".format(type(self).__name__, self._score)
def compare(self, other):
"""Return true if self is better than other based on 'higher_is_better'."""
assert(type(self) is type(other))
# make sure we are not comparing different types of scores
if self.higher_is_better:
return self._score < other._score
else:
return self._score > other._score
class RSquared(Score):
def __init__(self, model, adjusted=False):
self.adjusted=adjusted
super(RSquared, self).__init__(
model=model,
higher_is_better=True
)
def __str__(self):
return "R^2 ({}adjusted) | {}".format(
"" if self.adjusted else "un",
self._score,
)
def compute(self):
""" Calculate the (adjusted) R^2 value of the model.
Arguments:
X - An optional DataFrame of the explanatory data to be used for
calculating R^2. Default is the training data.
Y - An optional DataFrame of the response data to be used for
calculating R^2. Default is the training data.
adjusted - A boolean indicating if the R^2 value is adjusted (True)
or not (False).
Returns:
A real value of the computed R^2 value.
"""
X = self.model.training_data
y = self.model.training_y
pred = self.model.predict(X)
sse = ((y.iloc[:, 0] - pred.iloc[:, 0]) ** 2).sum()
ssto = ((y.iloc[:, 0] - y.iloc[:, 0].mean()) ** 2).sum()
if self.adjusted:
numerator = sse
denominator = ssto
else:
numerator = sse / (len(y) - len(self.model.X_train_.columns) - 2)
denominator = ssto / (len(y) - 1)
return 1 - numerator / denominator
class MSE(Score):
def __init__(self, model):
super(MSE, self).__init__(
model=model,
higher_is_better=False
)
def compute(self):
dfs = _extract_dfs(self.model, dict_out=True)
sse = self.model.get_sse()
return sse / dfs["error_df"]
class MallowsCp(Score):
def __init__(self, model):
super(MallowsCp, self).__init__(
model=model,
higher_is_better=False,
)
def compute(self):
dfs = _extract_dfs(self.model, dict_out=True)
sse = self.model.get_sse()
sigma_sq = self.model.std_err_est ** 2
n, p = self.model.n, self.model.p
return sse / sigma_sq - n + (2 * p)
class AIC(Score):
def __init__(self, model):
super(AIC, self).__init__(
model=model,
higher_is_better=False,
)
def compute(self):
p = self.model.p
log_likelihood = self.model.log_likelihood()
return 2 * (p - log_likelihood)
class BIC(Score):
def __init__(self, model):
super(BIC, self).__init__(
model=model,
higher_is_better=False,
)
def compute(self):
n, p = self.model.n, self.model.p
log_likelihood = self.model.log_likelihood()
return math.log(n) * p - 2 * log_likelihood
"""All metrics that are supported by default."""
_metrics = dict(
r_squared=RSquared,
r_squared_adjusted=lambda model: RSquared(model=model, adjusted=True),
mse=MSE,
cp=MallowsCp,
aic=AIC,
bic=BIC,
)
def stepwise(
full_model,
metric_name,
forward=False,
naive=False,
data=None,
verbose=False,
):
"""Perform forward or backward stepwise regression.
Arguments:
full_model - A model object that contains all of the terms to be
considered for the procedure.
metric_name - A string containing the name of the metric to use in
the procedure. Options include: "r_squared", "r_squared_adjusted",
"mse", "cp", "aic", and "bic".
forward - If True, specifies forwards stepwise regression. If False,
specifies backwards stepwise regression. Default is False.
naive - If True, allows for the removal or addition of terms in a
model that depend on others being present (e.g. removing variable
'X' while an interaction between 'X' and 'Y' are still present).
Defaults to False.
data - A dataframe that, if specified, will be used for the stepwise
regression. If not specified, it is assumed the full_model has
already been trained using some other data.
verbose - If True, will print to console periodic updates.
Default is False.
Returns:
A dictionary containing the metric used, the best value achieved,
and the associated best model (ranked via the chosen metric).
"""
if data is not None:
full_model.fit(data)
metric_name = metric_name.lower()
ex_terms = full_model.ex
re_term = full_model.re
data = full_model.training_data
if ex_terms is None or re_term is None:
raise AssertionError("The full model must be fit prior to undergoing a stepwise procedure.")
if metric_name not in _metrics:
raise KeyError("Metric '{}' not supported. The following metrics are supported: {}".format(
metric_name,
list(_metrics.keys())
))
metric_func = _metrics[metric_name]
ex_term_list = ex_terms.get_terms()
if forward:
best_model = LinearModel(Constant(1), re_term)
best_model.fit(data)
else:
best_model = full_model
best_metric = metric_func(best_model)
while len(ex_term_list) > 0:
best_potential_metric = metric_func(None)
best_potential_model = None
best_idx = None
if forward and not naive:
ex_term_list_expression = None
for t in ex_term_list:
if ex_term_list_expression is None:
ex_term_list_expression = t
else:
ex_term_list_expression = ex_term_list_expression + t
# Find all terms that do not depend on other terms
leaves = OrderedSet(term for term in ex_term_list if not \
term.contains(ex_term_list_expression - term))
for i, term in enumerate(ex_term_list):
try:
if forward:
# validate if adding term is valid
if not naive:
if term not in leaves:
continue
potential_model = LinearModel(
best_model.given_ex + term,
re_term,
)
else:
# validate if removing term is valid
if not naive:
if (best_model.given_ex - term).contains(term):
continue
potential_model = LinearModel(
best_model.given_ex - term,
re_term,
)
potential_model.fit(data)
potential_metric = metric_func(potential_model)
if best_potential_metric.compare(potential_metric):
best_potential_metric = potential_metric
best_potential_model = potential_model
best_idx = i
if verbose:
print(potential_model)
print(potential_metric)
print("Current best potential model" if best_idx == i else "Not current best potential")
print()
except np.linalg.linalg.LinAlgError:
continue
if best_metric.compare(best_potential_metric):
best_metric = best_potential_metric
best_model = best_potential_model
if verbose:
print("!!! New model found. Now including", ex_term_list[best_idx])
print()
del ex_term_list[best_idx]
else:
if verbose:
print("!!! No potential models better than prior. Exiting search.")
print()
break
else:
if verbose:
print("!!! Exhausted all potential terms. None left to consider.")
return dict(
forward=forward,
metric=best_metric,
metric_name=metric_name,
best_model=best_model
) | /salmon_lm-1.2-py3-none-any.whl/salmon/building.py | 0.915005 | 0.528412 | building.py | pypi |
from .model import *
from scipy.stats import f
from ordered_set import OrderedSet
import numpy as np
import pandas as pd
def anova(model1, model2 = None):
"""Perform inference by comparing two models.
User-facing function to execute an Analysis of Variance for one or two
models. Should only model be given, then a general F-test will be executed
on all of the coefficients. Should two models be given, then a partial
F-test will be executed. Note that one model needs to be a subset of the
other for this to properly evaluate.
Arguments:
model1 - A Model object that has been fit on some data
model2 - A Model object that has been fit on some data
Returns:
A DataFrame that contains relevant statistics for the test performed
"""
if model2 is None:
return _anova_terms(model1)
elif is_subset(model1, model2):
return _anova_models(model1, model2)
elif is_subset(model2, model1):
return _anova_models(model2, model1)
else:
raise Exception("Parameters must either be one model or two models where one is a subset of the other.")
def is_subset(model1, model2):
"""Checks if model1 contains all the terms of model2. In other words,
checks if model2 is a subset of model1.
Arguments:
model1 - A Model object that has been fit on some data.
model2 - A Model object that has been fit on some data.
Returns:
A boolean value that is True if model2 is a subset of model1,
False if model2 is not a subset of model1.
"""
if not model1.given_re.__sim__(model2.given_re):
# Models should both have the same response variable
return False
terms1 = OrderedSet(model1.ex.get_terms())
terms2 = OrderedSet(model2.ex.get_terms())
return terms2.issubset(terms1)
def _calc_stats(numer_ss, numer_df, denom_ss, denom_df):
"""Given the appropriate sum of squares for the numerator and the mean sum
of squares for the denominator (with respective degrees of freedom) this
will return the relevant statistics of an F-test.
Arguments:
numer_ss - Sum of squares for the numerator.
numer_df - Degrees of freedom for the numerator.
denom_ms - Mean sum of squares for the denominator.
denom_df - Degrees of freedom for the denominator.
Returns:
A tuple of three values: Element 0 contains the mean sum of squares for
the numerator, element 1 contains the F statistic calculated, and
element 2 contains the associated p-value for the generated
F-statistic.
"""
numer_ms = numer_ss / numer_df
denom_ms = denom_ss / denom_df
f_val = numer_ms / denom_ms
p_val = f.sf(f_val, numer_df, denom_df)
return f_val, p_val
def _process_term(orig_model, term):
"""Obtains needed sum of squared residuals of a model fitted without a
specified term/coefficient.
Arguments:
orig_model - A fitted Model object.
term - A Variable object to be left out of the original model when
fitting.
Returns:
A real value indicated the sum of squared residuals.
"""
new_model = LinearModel(orig_model.given_ex - term, orig_model.given_re)
new_model.fit(orig_model.training_data)
return new_model.get_sse(), new_model.get_ssr()
def _extract_dfs(model, dict_out=False):
"""Obtains the different degrees of freedom for a model in reference to an
F-test.
Arguments:
model - A fitted Model object
Returns:
A tuple containing three elements: Element 0 contains the degrees of
freedom for the explantory variables, element 1 contains the degrees
of freedom for the residuals, and element 2 contains the total degrees
of freedom for the model.
"""
reg_df = model.ex.get_dof()
total_df = len(model.X_train_) - 1
error_df = total_df - reg_df
if dict_out:
return dict(
model_df=reg_df,
total_df=total_df,
error_df=error_df
)
else:
return reg_df, error_df, total_df
def _anova_terms(model):
"""Perform a global F-test by analyzing all possible models when you leave
one coefficient out while fitting.
Arguments:
model - A fitted model object.
Returns:
A DataFrame object that contains the degrees of freedom, adjusted sum
of squares, adjusted mean sum of squares, F values, and p values for
the associated tests performed.
"""
full_reg_df, full_error_df, total_df = _extract_dfs(model)
# Full model values
full_sse = model.get_sse() # sum of squared errors
full_ssr = model.get_ssr() # sum of squares explained by model
full_sst = model.get_sst()
global_f_val, global_p_val = _calc_stats(
full_ssr,
full_reg_df,
full_sse,
full_error_df,
)
# Calculate the general terms now
indices = ["Global Test"]
sses = [full_ssr]
ssrs = [full_ssr]
f_vals = [global_f_val]
p_vals = [global_p_val]
dfs = [full_reg_df]
terms = model.ex.get_terms()
for term in terms:
term_df = term.get_dof()
reduced_sse, reduced_ssr = _process_term(model, term)
reduced_f_val, reduced_p_val = _calc_stats(
full_ssr - reduced_ssr,
term_df,
full_sse,
full_error_df,
)
indices.append("- " + str(term))
sses.append(reduced_sse)
ssrs.append(reduced_ssr)
dfs.append(term_df)
f_vals.append(reduced_f_val)
p_vals.append(reduced_p_val)
# Finish off the dataframe's values
indices.append("Error")
sses.append("")
ssrs.append("")
dfs.append(full_error_df)
f_vals.append("")
p_vals.append("")
return pd.DataFrame({
"DF" : dfs,
"SS Err.": sses,
"SS Reg." : ssrs,
"F" : f_vals,
"p" : p_vals
}, index = indices, columns = ["DF", "SS Err.", "SS Reg.", "F", "p"])
def _anova_models(full_model, reduced_model):
"""Performs a partial F-test to compare two models.
Arguments:
full_model - A fitted Model object.
reduced_model - A fitted Model object that is a subset of the
full_model.
Returns:
A DataFrame object that contains the resdiuals' degrees of freedom, sum
of squares of the regression, degrees of freedom for the model, sum of
squared residuals, the F value, and the p value for the associated test
performed.
"""
full_label = str(full_model)
reduced_label = str(reduced_model)
f_reg_df, f_error_df, f_total_df = _extract_dfs(full_model)
r_reg_df, r_error_df, r_total_df = _extract_dfs(reduced_model)
f_sse, f_ssr = full_model.get_sse(), full_model.get_ssr()
r_sse, r_ssr = reduced_model.get_sse(), reduced_model.get_ssr()
f_val, p_val = _calc_stats(
r_sse - f_sse,
r_error_df - f_error_df,
f_sse,
f_error_df,
)
indices = ["Full Model", "- Reduced Model", "Error"]
df = [f_reg_df, f_reg_df - r_reg_df, f_error_df]
ssrs = [f_ssr, r_ssr, ""]
sses = [f_sse, r_sse, ""]
f = ["", f_val, ""]
p = ["", p_val, ""]
return pd.DataFrame({
"DF" : df,
"SS Err." : sses,
"SS Reg." : ssrs,
"F" : f,
"p" : p},
index = indices, columns = ["DF", "SS Err.", "SS Reg.", "F", "p"]) | /salmon_lm-1.2-py3-none-any.whl/salmon/comparison.py | 0.917275 | 0.654495 | comparison.py | pypi |
import numpy as np
import scipy.stats as stats
from scipy.linalg import solve_triangular, cho_solve
import pandas as pd
from pandas.plotting import scatter_matrix
import matplotlib.pyplot as plt
import warnings
from itertools import product
from collections import OrderedDict
from ordered_set import OrderedSet
from .expression import Expression, Combination, Identity, Constant, LightDataFrame, Quantitative
plt.style.use('ggplot')
def _float_format(x):
abs_x = abs(x)
if abs_x >= 1e4:
rep = "{:.3e}".format(x)
elif abs_x >= 1e0:
rep = "{:."
rep += str(3 - int(np.floor(np.log10(abs_x))))
rep += "f}"
rep = rep.format(x)
elif abs_x >= 1e-3:
rep = "{:.4f}".format(x)
elif abs_x >= 1e-9:
rep = "{:.3e}".format(x)
elif abs_x >= 1e-99:
rep = "{:.1e}".format(x)
else:
rep = "{:.0e}".format(x)
rep = rep.replace("e-0", "e-").replace("e+0", "e+")
rep = rep.replace("0e+0", "0.000")
return rep
pd.set_option("display.float_format", _float_format)
def _confint(estimates, standard_errors, df, crit_prob):
crit_value = stats.t.ppf(crit_prob, df)
ci_widths = crit_value * standard_errors
return estimates - ci_widths, estimates + ci_widths
def qr_solve(Q, R, y):
"""Solve least squares X / y, given QR decomposition of X"""
_, p = R.shape
if p:
return solve_triangular(R, Q.T @ y, check_finite=False)
else:
return np.empty(shape=0)
def cho_inv(R):
"""Calculate inverse of X.T @ X, given Cholesky decomposition R.T @ R"""
_, p = R.shape
if p:
return cho_solve((R, False), np.identity(p), check_finite=False)
else:
return np.empty(shape=(0, 0))
class Model:
"""A general Model class that both Linear models and (in the future)
General Linear models stem from."""
def __init__(self):
"""Create a Model object (only possible through inheritance)."""
raise NotImplementedError()
def fit(self, data):
"""Fit a model to given data.
Arguments:
data - A DataFrame with column names matching specified terms
within the Model's explanatory and response Expression
objects.
Returns:
A DataFrame with relevant statistics of fitted Model (coefficients,
t statistics, p-values, etc.).
"""
raise NotImplementedError()
def predict(self, data):
"""Predict response values for a given set of data.
Arguments:
data - A DataFrame with column names matching specified terms
within the Model's explanatory Expression object.
Returns:
A Series of the predicted values.
"""
raise NotImplementedError()
def plot_matrix(self, **kwargs):
"""Produce a matrix of pairwise scatter plots of the data it was fit
on. The diagonal of the matrix will feature histograms instead of
scatter plots.
Arguments:
kwargs - One or more named parameters that will be ingested by
Pandas' scatter_matrix plotting function.
Returns:
A matplotlib plot object containing the matrix of scatter plots.
"""
df = pd.concat([self.X_train_, self.y_train_], axis=1)
scatter_matrix(df, **kwargs)
class LinearModel(Model):
"""A specific Model that assumes the response variable is linearly related
to the explanatory variables."""
def __init__(self, explanatory, response, intercept=True):
"""Create a LinearModel object.
An intercept is included in the model by default. To fit a model
without an intercept term, either set intercept=False or subtract '1'
from the explanatory Expression.
Arguments:
explanatory - Explanatory variables used to condition the model on.
Represented either by an `Expression` that is either a single
term or a `Combination` of terms, or data in the form of a
pandas `DataFrame` or numpy 1-D / 2-D `ndarray`.
response - Response variables used as the target for modeling.
Represented either by an `Expression` object, or data in the
form of a pandas `Series` or numpy 1-D `ndarray` object.
intercept - A boolean indicating whether an intercept should be
included (True) or not (False).
"""
# may be overwritten if `explanatory` and `response` are given as
# data structures rather than expressions
self.training_data = None
if explanatory is None:
explanatory = 0
if isinstance(explanatory, (int, float)):
explanatory = Constant(explanatory)
if (isinstance(explanatory, (pd.DataFrame, np.ndarray)) or
isinstance(response, (pd.Series, np.ndarray))):
assert(isinstance(explanatory, (pd.DataFrame, np.ndarray)) and
isinstance(response, (pd.Series, np.ndarray)))
if isinstance(explanatory, np.ndarray):
if len(explanatory.shape) == 1:
explanatory = explanatory.reshape((len(explanatory), 1))
else:
assert(len(explanatory.shape) == 2)
expl_data = explanatory
expl_cols = ["x{}".format(i+1) for i in range(expl_data.shape[1])]
else: # isinstance(explanatory, pd.DataFrame)
expl_data = explanatory.values
expl_cols = list(explanatory.columns)
if isinstance(response, np.ndarray):
if len(response.shape) == 1:
response = response.reshape((len(response), 1))
else:
assert(response.shape[1] == 1)
assert(len(response.shape) == 2)
resp_data = response
resp_cols = ["y"]
else: # isinstance(response, pd.Series)
resp_data = response.values.reshape((len(response), 1))
if response.name is None:
resp_cols = ["y"]
else:
resp_cols = [response.name]
self.X_train_ = LightDataFrame(expl_data, columns=expl_cols)
self.y_train_ = LightDataFrame(resp_data, columns=resp_cols)[:, 0]
self.training_data = LightDataFrame(
np.hstack([expl_data, resp_data]),
columns=expl_cols+resp_cols,
)
# Bypass construction because we know explicitly that the terms will not collide
explanatory = Combination(terms=[])
explanatory.terms = OrderedSet(Quantitative(c) for c in expl_cols)
if intercept:
explanatory.terms.add(Constant(1))
response = Quantitative(resp_cols[0])
else:
if intercept:
explanatory = explanatory + 1
if explanatory == Constant(0):
raise Exception(
"Must have at least one predictor in explanatory "
"expression and/or intercept enabled for a valid model."
)
self.given_ex = explanatory
constant = self.given_ex.reduce()['Constant']
self.intercept = constant is not None
if self.intercept:
if isinstance(self.given_ex, Combination) and len(self.given_ex.terms) > 1:
# This bypasses some potentially expensive checks
self.given_ex.terms.remove(Constant(constant))
else:
# This was done to easily check all options for indicating
# a wanted intercept
self.given_ex = self.given_ex - constant
# This will collapse any combination of variables into a single column
self.given_re = Identity(response)
self.ex = None
self.re = None
self.categorical_levels = dict()
def __str__(self):
"""Convert a LinearModel to a str format for printing."""
if self.intercept:
return str(self.given_re) + " ~ " + str(1 + self.given_ex)
else:
return str(self.given_re) + " ~ " + str(self.given_ex)
def fit(self, X=None, y=None):
"""Fit a LinearModel to data.
Data can either be provided as a single DataFrame X that contains both
the explanatory and response variables, or in separate data
structures, one containing the explanatory variables and the other
containing the response variable. The latter is implemented so that
LinearModel can be used as scikit-learn Estimator.
It is fine to have extra columns in the DataFrame that are not used by
the model---they will simply be ignored.
Arugments:
X - An optional DataFrame containing all of the explanatory
variables in the model and possibly the response variable
too. If not given, the model is assumed to have been
instantiated with the data in the constructor.
y - An optional Series that contains the response variable.
Returns:
A DataFrame containing relevant statistics of fitted Model (e.g.,
coefficients, p-values).
"""
if X is None:
X = self.X_train_
y = self.y_train_
self.re = self.given_re
self.ex = self.given_ex
else:
if y is None:
y = X
# Initialize the categorical levels
self.categorical_levels = dict()
self.training_data = X
# Replace all Var's with either Q's or C's
self.re = self.given_re.copy().interpret(y)
self.ex = self.given_ex.copy().interpret(X)
# Construct X matrix
X = self.ex.evaluate(X)
self.X_train_ = X
# Construct y vector
y = self.re.evaluate(y)[:, 0]
self.y_train_ = y
# Perform linear algebra to find coefficients
coef_, cols = self._fit(X, y)
# Now collect other desired statistics
# Get standard errors (diagonal of the covariance matrix)
se_coef_ = np.sqrt(np.diagonal(self.cov_))
# Get inference for coefficients
self.t_ = coef_ / se_coef_
self.p_ = 2 * stats.t.cdf(-abs(self.t_), self.rdf)
lower_bound, upper_bound = _confint(coef_, se_coef_, self.rdf, .975)
# Create output table
table = pd.DataFrame(OrderedDict((
("Coefficient", coef_), ("SE", se_coef_),
("t", self.t_), ("p", self.p_),
("2.5%", lower_bound), ("97.5%", upper_bound)
)), index=cols)
self.coef_ = table["Coefficient"]
self.se_coef_ = table["SE"]
return table
def _fit(self, X, y):
if np.isnan(X).any() or np.isnan(y).any():
warnings.warn("NaN's detected in data. Estimated coefficients may not be accurate.")
# Get dimensions
self.n, self.p = X.shape
# Center if there is an intercept
if self.intercept:
X_offsets = X.mean(axis=0)
y_offset = y.mean()
X = X - X_offsets[np.newaxis, :]
else:
X_offsets = 0
y_offset = 0
# Get coefficients using QR decomposition
q, r = np.linalg.qr(X)
coef_ = qr_solve(q, r, y - y_offset)
cols = X.columns.copy() # column names
# Get fitted values and residuals
self.fitted_ = y_offset + np.dot(X, coef_)
self.residuals_ = y - self.fitted_
# Get residual variance
self.rdf = self.n - self.p - (1 if self.intercept else 0)
self.resid_var_ = (self.residuals_ ** 2).sum() / self.rdf
# Get covariance matrix between coefficients
self.cov_ = self.resid_var_ * cho_inv(r)
# Update coefficients and covariance matrix with intercept
# (if applicable)
if self.intercept:
cols.append("Intercept")
coef_ = np.append(coef_, y_offset - (X_offsets * coef_).sum())
cov_coef_intercept = -1*np.dot(self.cov_, X_offsets)
var_intercept = self.resid_var_ / self.n
var_intercept -= (X_offsets * cov_coef_intercept).sum()
self.cov_ = np.block([
[self.cov_, cov_coef_intercept[:, np.newaxis]],
[cov_coef_intercept[np.newaxis, :], var_intercept]
])
# TODO: Return the covariance rather than save it
return coef_, cols
def likelihood(self, data=None):
"""Calculate likelihood for a fitted model on either original data or
new data."""
return np.exp(self.log_likelihood(data))
def log_likelihood(self, data=None):
"""Calculate a numerically stable log_likelihood for a fitted model
on either original data or new data."""
if data is None:
residuals = self.residuals_
else:
y = self.re.evaluate(data)
y_hat = self.predict(
data,
for_plot=False,
confidence_interval=False,
prediction_interval=False,
)
residuals = y[:, 0] - y_hat.iloc[:, 0]
n = len(residuals)
return (-n / 2 * (np.log(2 * np.pi) + np.log(self.resid_var_)) -
(1 / (2 * self.resid_var_)) * (residuals ** 2).sum())
def confidence_intervals(self, alpha=None, conf=None):
"""Calculate confidence intervals for the coefficients.
This function assumes that Model.fit() has already been called.
Arguments:
alpha - A float between 0.0 and 1.0 representing the non-coverage
probability of the confidence interval. In other words, the
confidence level is 1 - alpha / 2.
conf - A float between 0.0 and 1.0 representing the confidence
level. Only one of alpha or conf needs to be specified. If
neither are specified, a default value of conf=0.95 will be
used.
Returns:
A DataFrame containing the appropriate confidence intervals for
all the coefficients.
"""
if alpha is None:
if conf is None:
conf = 0.95
alpha = 1 - conf
crit_prob = 1 - (alpha / 2)
lower_bound, upper_bound = _confint(self.coef_, self.se_coef_,
self.rdf, crit_prob)
return pd.DataFrame({
"%.1f%%" % (100 * (1 - crit_prob)): lower_bound,
"%.1f%%" % (100 * crit_prob): upper_bound
}, index=self.coef_.index)
def predict(
self,
data,
for_plot=False,
confidence_interval=False,
prediction_interval=False,
):
"""Predict response values from a fitted Model.
Arguments:
data - A DataFrame containing the values of the explanatory
variables, for which predictions are desired.
for_plot - A boolean indicating if these predictions are computed
for the purposes of plotting.
confidence_interval - If a confidence interval for the mean
response is desired, this is a float between 0.0 and 1.0
indicating the confidence level to use.
prediction_interval - If a prediction interval is desired, this is
a float between 0.0 and 1.0 indicating the confidence level to
use.
Returns:
A DataFrame containing the predictions and/or intervals.
"""
# Construct the X matrix
if isinstance(data, (LightDataFrame, pd.DataFrame)):
X = self.ex.evaluate(data, fit=False)
else:
assert(data.shape[-1] == self.X_train_.shape[-1])
X = data
if self.intercept:
n, _ = X.shape
X = np.hstack((X, np.ones((n, 1))))
y_vals = np.dot(X, self.coef_)
if isinstance(data, pd.DataFrame):
index = data.index
else:
index = None
predictions = pd.DataFrame(
{"Predicted " + str(self.re): y_vals},
index=index,
)
if confidence_interval or prediction_interval:
if confidence_interval:
alpha = confidence_interval
widths = self._confidence_interval_width(
X,
confidence_interval,
)
else:
alpha = prediction_interval
widths = self._prediction_interval_width(
X,
prediction_interval,
)
crit_prob = 1 - (alpha / 2)
lower = y_vals - widths
upper = y_vals + widths
predictions[str(round(1 - crit_prob, 5) * 100) + "%"] = lower
predictions[str(round(crit_prob, 5) * 100) + "%"] = upper
return predictions
def get_sse(self):
"""Get the SSE of a fitted model."""
sse = ((self.y_train_ - self.fitted_) ** 2).sum()
return sse
def get_ssr(self):
"""Get the SSR of a fitted model."""
ssr = self.get_sst() - self.get_sse()
return ssr
def get_sst(self):
"""Get the SST of a fitted model."""
sst = ((self.y_train_ - self.y_train_.mean()) ** 2).sum()
return sst
def r_squared(self, X=None, y=None, adjusted=False, **kwargs):
"""Calculate the (adjusted) R^2 value of the model.
This can be used as a metric within the sklearn ecosystem.
Arguments:
X - An optional DataFrame of the explanatory data to be used for
calculating R^2. Default is the training data.
y - An optional DataFrame of the response data to be used for
calculating R^2. Default is the training data.
adjusted - A boolean indicating if the R^2 value is adjusted
(True) or not (False).
Returns:
A real value of the computed R^2 value.
"""
# Allow interfacing with sklearn's cross fold validation
# self.fit(X, y)
if X is None:
X = self.training_data
if y is None:
y = self.y_train_
pred = self.predict(X)
sse = ((y - pred.iloc[:, 0]) ** 2).sum()
ssto = ((y - y.mean()) ** 2).sum()
if adjusted:
numerator = sse
denominator = ssto
else:
numerator = sse / (len(y) - len(self.X_train_.columns) - 2)
denominator = ssto / (len(y) - 1)
return 1 - numerator / denominator
def score(self, X=None, y=None, adjusted=False, **kwargs):
"""Wrapper for sklearn api for cross fold validation.
See LinearModel.r_squared.
"""
return self.r_squared(X, y, adjusted, **kwargs)
def _prediction_interval_width(self, X_new, alpha=0.05):
"""Helper function for calculating prediction interval widths."""
mse = self.get_sse() / self.rdf
s_yhat_squared = (X_new.dot(self.cov_) * X_new).sum(axis=1)
s_pred_squared = mse + s_yhat_squared
t_crit = stats.t.ppf(1 - (alpha / 2), self.rdf)
return t_crit * (s_pred_squared ** 0.5)
def _confidence_interval_width(self, X_new, alpha=0.05):
"""Helper function for calculating confidence interval widths."""
_, p = X_new.shape
s_yhat_squared = (X_new.dot(self.cov_) * X_new).sum(axis=1)
# t_crit = stats.t.ppf(1 - (alpha / 2), n-p)
W_crit_squared = p * stats.f.ppf(1 - (alpha / 2), p, self.rdf)
return (W_crit_squared ** 0.5) * (s_yhat_squared ** 0.5)
def plot(
self,
categorize_residuals=True,
jitter=None,
confidence_band=False,
prediction_band=False,
original_y_space=True,
transformed_y_space=False,
alpha=0.5,
**kwargs
):
"""Visualizes the fitted LinearModel and its line of best fit.
Arguments:
categorize_residuals - A boolean that indicates if the residual
points should be colored by categories (True) or not (False).
jitter - A boolean that indicates if residuals should be jittered
in factor plots (True) or not (False).
confidence_band - A real value that specifies the width of the
confidence band to be plotted. If band not desired, parameter
is set to False.
prediction_band - A real value that specifies the width of the
prediction band to be plotted. If band not desired, parameter
is set to False.
y_space - A str that indicates the type of output space for the
y-axis. If set to 't', the transformed space will be plotted.
If set to 'o', the original or untransformed space will be
plotted. If set to 'b', both will be plotted side-by-side.
alpha - A real value that indicates the transparency of the
residuals. Default is 0.5.
kwargs - Additional named parameters that will be passed onto
lower level matplotlib plotting functions.
Returns:
A matplotlib plot appropriate visualization of the model.
"""
if confidence_band and prediction_band:
raise Exception(
"Only one of {confidence_band, prediction_band} may "
"be set to True at a time."
)
terms = self.ex.reduce()
if original_y_space and transformed_y_space:
fig, (ax_o, ax_t) = plt.subplots(1, 2, **kwargs)
y_spaces = ['o', 't']
axs = [ax_o, ax_t]
elif transformed_y_space: # at least one of the two is False
fig, ax_t = plt.subplots(1, 1, **kwargs)
y_spaces = ['t']
axs = [ax_t]
elif original_y_space:
fig, ax_o = plt.subplots(1, 1, **kwargs)
y_spaces = ['o']
axs = [ax_o]
else:
raise AssertionError(
"At least one of either `original_y_space` or "
"`transformed_y_space` should be True in "
"`model.plot(...)` call."
)
for y_space_type, ax in zip(y_spaces, axs):
original_y_space = y_space_type == "o"
# Redundant, we untransform in later function calls
# TODO: Fix later
y_vals = self.y_train_
if original_y_space:
y_vals = self.re.untransform(y_vals)
# Plotting Details:
min_y = min(y_vals)
max_y = max(y_vals)
diff = (max_y - min_y) * 0.05
min_y = min(min_y - diff, min_y + diff) # Add a small buffer
max_y = max(max_y - diff, max_y + diff)
# TODO: Check if min() and max() are necessary here
plot_args = {
"categorize_residuals": categorize_residuals,
"jitter": jitter,
"terms": terms,
"confidence_band": confidence_band,
"prediction_band": prediction_band,
"original_y_space": original_y_space,
"alpha": alpha,
"plot_objs": {
"figure": fig,
"ax": ax,
"y": {
"min": min_y,
"max": max_y,
"name": str(self.re)
}
}
}
if len(terms['Q']) == 1:
self._plot_one_quant(**plot_args)
elif len(terms['Q']) == 0 and len(terms['C']) > 0:
self._plot_zero_quant(**plot_args) # TODO Make function
else:
raise Exception(
"Plotting line of best fit only expressions that reference"
" a single variable."
)
return fig
def _plot_zero_quant(
self,
categorize_residuals,
jitter,
terms,
confidence_band,
prediction_band,
original_y_space,
alpha,
plot_objs,
):
"""A helper function for plotting models in the case no quantitiative
variables are present.
"""
ax = plot_objs['ax']
unique_cats = list(terms['C'])
levels = [cat.levels for cat in unique_cats]
level_amounts = [len(level_ls) for level_ls in levels]
ml_index = level_amounts.index(max(level_amounts))
ml_cat = unique_cats[ml_index]
ml_levels = levels[ml_index]
# List of categorical variables without the ml_cat
cats_wo_most = unique_cats[:]
cats_wo_most.remove(ml_cat)
# List of levels for categorical variables without the ml_cat
levels_wo_most = levels[:]
levels_wo_most.remove(levels[ml_index])
single_cat = len(cats_wo_most) == 0
if single_cat:
level_combinations = [None]
else:
level_combinations = product(*levels_wo_most) # Cartesian product
# To produce an index column to be used for the x-axis alignment
line_x = pd.DataFrame({str(ml_cat): ml_levels}).reset_index()
points = pd.merge(self.training_data, line_x, on=str(ml_cat))
plot_objs['x'] = {'name': 'index'}
points["<Y_RESIDS_TO_PLOT>"] = self.re.evaluate(points)
if original_y_space:
points["<Y_RESIDS_TO_PLOT>"] = self.re.untransform(
points["<Y_RESIDS_TO_PLOT>"]
)
# Inefficient due to transforming, then untransforming.
# Need to refactor later.
plots = []
labels = []
linestyles = [':', '-.', '--', '-']
for combination in level_combinations:
points_indices = pd.Series([True] * len(points))
if not single_cat:
label = []
for element, var in zip(combination, cats_wo_most):
name = str(var)
line_x[name] = element
label.append(str(element))
# Filter out points that don't apply to categories
points_indices = points_indices & (points[name] == element)
labels.append(", ".join(label))
line_type = linestyles.pop()
linestyles.insert(0, line_type)
line_y = self.predict(line_x, for_plot=True)
y_vals = line_y["Predicted " + plot_objs['y']['name']]
if original_y_space:
y_vals_to_plot = self.re.untransform(y_vals)
else:
y_vals_to_plot = y_vals
plot, = ax.plot(line_x.index, y_vals_to_plot, linestyle=line_type)
if jitter is None or jitter is True:
variability = np.random.normal(
scale=0.025,
size=sum(points_indices),
)
else:
variability = 0
# Y values must come from points because earlier merge
# shuffles rows
ax.scatter(
points.loc[points_indices, 'index'] + variability,
points.loc[points_indices, "<Y_RESIDS_TO_PLOT>"],
c="black" if single_cat else plot.get_color(),
alpha=alpha,
)
plots.append(plot)
if confidence_band:
self._plot_band(
line_x,
y_vals,
plot.get_color(),
original_y_space,
plot_objs,
True,
confidence_band,
)
elif prediction_band:
self._plot_band(
line_x,
y_vals,
plot.get_color(),
original_y_space,
plot_objs,
False,
prediction_band,
)
if not single_cat and len(cats_wo_most) > 0:
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(
plots,
labels,
title=", ".join([str(cat) for cat in cats_wo_most]),
loc="center left",
bbox_to_anchor=(1, 0.5),
)
ax.set_xlabel(str(ml_cat))
ax.set_xticks(line_x.index)
ax.set_xticklabels(line_x[str(ml_cat)])
if not original_y_space:
ax.set_ylabel(plot_objs['y']['name'])
else:
ax.set_ylabel(self.re.untransform_name())
ax.grid()
ax.set_ylim([plot_objs['y']['min'], plot_objs['y']['max']])
def _plot_one_quant(
self,
categorize_residuals,
jitter,
terms,
confidence_band,
prediction_band,
original_y_space,
alpha,
plot_objs,
):
"""A helper function for plotting models in the case only one
quantitiative variable is present. Also support zero or more
categorical variables."""
# Get the "first" and only element in the set
x_term = next(iter(terms['Q']))
x_name = str(x_term)
x = self.training_data[x_name]
min_x = min(x)
max_x = max(x)
diff = (max_x - min_x) * 0.05
min_x = min(min_x - diff, min_x + diff) # Add a small buffer
max_x = max(max_x - diff, max_x + diff)
# TODO: Check if min() and max() are necessary here
plot_objs['x'] = {"min" : min_x, "max" : max_x, "name" : x_name}
# Quantitative inputs
line_x = pd.DataFrame({x_name: np.linspace(min_x, max_x, 100)})
if len(terms['C']) == 0:
self._plot_one_quant_zero_cats(
x,
line_x,
jitter,
terms,
confidence_band,
prediction_band,
original_y_space,
alpha,
plot_objs,
)
else:
self._plot_one_quant_some_cats(
x,
line_x,
categorize_residuals,
jitter,
terms,
confidence_band,
prediction_band,
original_y_space,
alpha,
plot_objs,
)
ax = plot_objs['ax']
ax.set_xlabel(x_name)
if not original_y_space:
ax.set_ylabel(plot_objs['y']['name'])
else:
ax.set_ylabel(self.re.untransform_name())
ax.grid()
ax.set_xlim([min_x, max_x])
ax.set_ylim([plot_objs['y']['min'], plot_objs['y']['max']])
def _plot_one_quant_zero_cats(
self,
x,
line_x,
jitter,
terms,
confidence_band,
prediction_band,
original_y_space,
alpha,
plot_objs,
):
"""A helper function for plotting models in the case only one
quantitiative variable and no categorical variables are present."""
x_name = plot_objs['x']['name']
ax = plot_objs['ax']
line_y = self.predict(line_x)
y_vals = line_y["Predicted " + plot_objs['y']['name']]
if original_y_space:
y_vals_to_plot = self.re.untransform(y_vals)
else:
y_vals_to_plot = y_vals
line_fit, = ax.plot(line_x[x_name], y_vals_to_plot)
if confidence_band:
self._plot_band(
line_x,
y_vals,
line_fit.get_color(),
original_y_space,
plot_objs,
True,
confidence_band,
)
elif prediction_band:
self._plot_band(
line_x,
y_vals,
line_fit.get_color(),
original_y_space,
plot_objs,
False,
prediction_band,
)
y_train_vals = self.y_train_
if original_y_space:
y_train_vals = self.re.untransform(y_train_vals)
ax.scatter(x, y_train_vals, c = "black", alpha = alpha)
def _plot_band(
self,
line_x,
y_vals,
color,
original_y_space,
plot_objs,
use_confidence=False,
alpha=0.05,
):
"""A helper function to plot the confidence or prediction bands for
a model. By default will plot prediction bands."""
x_name = plot_objs['x']['name']
X_new = self.ex.evaluate(line_x, fit = False)
if self.intercept:
n, _ = X_new.shape
X_new = np.hstack((X_new, np.ones((n, 1))))
if use_confidence:
widths = self._confidence_interval_width(X_new, alpha)
else:
widths = self._prediction_interval_width(X_new, alpha)
lower = y_vals - widths
upper = y_vals + widths
if original_y_space:
lower = self.re.untransform(lower)
upper = self.re.untransform(upper)
plot_objs['ax'].fill_between(
x=line_x[x_name],
y1=lower,
y2=upper,
color=color,
alpha=0.3,
)
def _plot_one_quant_some_cats(
self,
x,
line_x,
categorize_residuals,
jitter,
terms,
confidence_band,
prediction_band,
original_y_space,
alpha,
plot_objs,
):
"""A helper function for plotting models in the case only one
quantitiative variable and one or more categorical variables are
present."""
ax = plot_objs['ax']
x_name = plot_objs['x']['name']
y_name = plot_objs['y']['name']
plots = []
labels = []
linestyles = [':', '-.', '--', '-']
cats = list(terms['C'])
cat_names = [str(cat) for cat in cats]
levels = [cat.levels for cat in cats]
# cartesian product of all combinations
level_combinations = product(*levels)
dummy_data = line_x.copy() # rest of columns set in next few lines
y_train_vals = self.y_train_
if original_y_space:
y_train_vals = self.re.untransform(y_train_vals)
for level_set in level_combinations:
label = [] # To be used in legend
for (cat,level) in zip(cats,level_set):
dummy_data[str(cat)] = level # set dummy data for prediction
label.append(str(level))
line_type = linestyles.pop() # rotate through line styles
linestyles.insert(0, line_type)
line_y = self.predict(dummy_data, for_plot=True)
y_vals = line_y["Predicted " + y_name]
if original_y_space:
y_vals_to_plot = self.re.untransform(y_vals)
else:
y_vals_to_plot = y_vals
plot, = ax.plot(dummy_data[x_name], y_vals_to_plot, linestyle=line_type)
plots.append(plot)
labels.append(", ".join(label))
if categorize_residuals:
indices_to_use = pd.Series([True] * len(x)) # gradually gets filtered out
for (cat,level) in zip(cats,level_set):
indices_to_use = indices_to_use & (self.training_data[str(cat)] == level)
ax.scatter(
x[indices_to_use],
y_train_vals[indices_to_use],
c=plot.get_color(),
alpha=alpha,
)
if confidence_band:
self._plot_band(
dummy_data,
y_vals,
plot.get_color(),
original_y_space,
plot_objs,
True,
confidence_band,
)
elif prediction_band:
self._plot_band(
dummy_data,
y_vals,
plot.get_color(),
original_y_space,
plot_objs,
False,
prediction_band,
)
# Legend
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax.legend(
plots,
labels,
title=", ".join(cat_names),
loc="center left",
bbox_to_anchor=(1, 0.5),
)
if not categorize_residuals:
ax.scatter(x, y_train_vals, c="black", alpha=alpha)
def residual_plots(self, **kwargs):
"""Plot the residual plots of the model.
Arguments:
kwargs - Named parameters that will be passed onto lower level
matplotlib plotting functions.
Returns:
A tuple containing the matplotlib (figure, list of axes) for the
residual plots.
"""
terms = self.X_train_.columns
fig, axs = plt.subplots(1, len(terms), **kwargs)
for term, ax in zip(terms, axs):
ax.scatter(self.X_train_.get_column(term), self.residuals_)
ax.set_xlabel(str(term))
ax.set_ylabel("Residuals")
ax.set_title(str(term) + " v. Residuals")
ax.grid()
return fig, axs
def partial_plots(self, alpha=0.5, **kwargs):
"""Plot the partial regression plots for the model
Arguments:
alpha - A real value indicating the transparency of the residuals.
Default is 0.5.
kwargs - Named parameters that will be passed onto lower level
matplotlib plotting functions.
Returns:
A tuple containing the matplotlib (figure, list of axes) for the
partial plots.
"""
#terms = self.ex.flatten(separate_interactions = False)
terms = self.ex.get_terms()
fig, axs = plt.subplots(1, len(terms), **kwargs)
for i, ax in zip(range(0, len(terms)), axs):
xi = terms[i]
sans_xi = Combination(terms[:i] + terms[i+1:])
yaxis = LinearModel(sans_xi, self.re)
xaxis = LinearModel(sans_xi, xi)
yaxis.fit(self.training_data)
xaxis.fit(self.training_data)
ax.scatter(xaxis.residuals_, yaxis.residuals_, alpha=alpha)
ax.set_title("Leverage Plot for " + str(xi))
return fig, axs
@staticmethod
def ones_column(data):
"""Helper function to create a column of ones for the intercept."""
return pd.DataFrame({"Intercept" : np.repeat(1, data.shape[0])})
def plot_residual_diagnostics(self, **kwargs):
"""Produce a matrix of four diagnostic plots:
the residual v. quantile plot, the residual v. fited values plot,
the histogram of residuals, and the residual v. order plot.
Arguments:
kwargs - Named parameters that will be passed onto lower level
matplotlib plotting functions.
Returns:
A tuple containing the matplotlib (figure, list of axes) for the
partial plots.
"""
f, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, **kwargs)
self.residual_quantile_plot(ax=ax1)
self.residual_fitted_plot(ax=ax2)
self.residual_histogram(ax=ax3)
self.residual_order_plot(ax=ax4)
f.suptitle("Residal Diagnostic Plots for " + str(self))
return f, (ax1, ax2, ax3, ax4)
def residual_quantile_plot(self, ax=None):
"""Produces the residual v. quantile plot of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
"""
if ax is None:
_, ax = plt.subplots(1,1)
stats.probplot(self.residuals_, dist="norm", plot=ax)
ax.set_title("Residual Q-Q Plot")
return ax
def residual_fitted_plot(self, ax=None):
"""Produces the residual v. fitted values plot of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
"""
if ax is None:
_, ax = plt.subplots(1,1)
ax.scatter(self.fitted_, self.residuals_)
ax.set_title("Fitted Values v. Residuals")
ax.set_xlabel("Fitted Value")
ax.set_ylabel("Residual")
return ax
def residual_histogram(self, ax=None):
"""Produces the residual histogram of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
"""
if ax is None:
_, ax = plt.subplots(1,1)
ax.hist(self.residuals_)
ax.set_title("Histogram of Residuals")
ax.set_xlabel("Residual")
ax.set_ylabel("Frequency")
return ax
def residual_order_plot(self, ax=None):
"""Produces the residual v. order plot of the model.
Arguments:
ax - An optional parameter that is a pregenerated Axis object.
Returns:
A rendered matplotlib axis object.
"""
if ax is None:
_, ax = plt.subplots(1,1)
ax.plot(self.training_data.index, self.residuals_, "o-")
ax.set_title("Order v. Residuals")
ax.set_xlabel("Row Index")
ax.set_ylabel("Residual")
return ax | /salmon_lm-1.2-py3-none-any.whl/salmon/model.py | 0.821474 | 0.556339 | model.py | pypi |
from __future__ import print_function, unicode_literals
from salmon import mail
LOADER = None
def load(template):
"""
Uses the registered loader to load the template you ask for.
It assumes that your loader works like Jinja2 or Mako in that
it has a LOADER.get_template() method that returns the template.
"""
assert LOADER, "You haven't set salmon.view.LOADER to a loader yet."
return LOADER.get_template(template)
def render(variables, template):
"""
Takes the variables given and renders the template for you.
Assumes the template returned by load() will have a .render()
method that takes the variables as a dict.
Use this if you just want to render a single template and don't
want it to be a message. Use render_message if the contents
of the template are to be interpreted as a message with headers
and a body.
"""
return load(template).render(variables)
def respond(variables, Body=None, Html=None, **kwd):
"""
Does the grunt work of cooking up a MailResponse that's based
on a template. The only difference from the salmon.mail.MailResponse
class and this (apart from variables passed to a template) are that
instead of giving actual Body or Html parameters with contents,
you give the name of a template to render. The kwd variables are
the remaining keyword arguments to MailResponse of From/To/Subject.
For example, to render a template for the body and a .html for the Html
attachment, and to indicate the From/To/Subject do this::
msg = view.respond(locals(), Body='template.txt',
Html='template.html',
From='test@test.com',
To='receiver@test.com',
Subject='Test body from "%(person)s".')
In this case you're using locals() to gather the variables needed for
the 'template.txt' and 'template.html' templates. Each template is
setup to be a text/plain or text/html attachment. The From/To/Subject
are setup as needed. Finally, the locals() are also available as
simple Python keyword templates in the From/To/Subject so you can pass
in variables to modify those when needed (as in the %(person)s in Subject).
"""
assert Body or Html, "You need to give either the Body or Html template of the mail."
for key in kwd:
kwd[key] = kwd[key] % variables
msg = mail.MailResponse(**kwd)
if Body:
msg.Body = render(variables, Body)
if Html:
msg.Html = render(variables, Html)
return msg
def attach(msg, variables, template, filename=None, content_type=None,
disposition=None):
"""
Useful for rendering an attachment and then attaching it to the message
given. All the parameters that are in salmon.mail.MailResponse.attach
are there as usual.
"""
data = render(variables, template)
msg.attach(filename=filename, data=data, content_type=content_type,
disposition=disposition) | /salmon-mail-3.2.0.tar.gz/salmon-mail-3.2.0/salmon/view.py | 0.679179 | 0.45538 | view.py | pypi |
from __future__ import print_function, unicode_literals
from email.utils import parseaddr
import uuid
from salmon import queue, view
class ConfirmationStorage(object):
"""
This is the basic confirmation storage. For simple testing purposes
you can just use the default hash db parameter. If you do a deployment
you can probably get away with a shelf hash instead.
You can write your own version of this and use it. The confirmation engine
only cares that it gets something that supports all of these methods.
"""
def __init__(self, db={}):
"""
Change the db parameter to a shelf to get persistent storage.
"""
self.confirmations = db
def clear(self):
"""
Used primarily in testing, this clears out all pending confirmations.
"""
self.confirmations.clear()
def key(self, target, from_address):
"""
Used internally to construct a string key, if you write
your own you don't need this.
NOTE: To support proper equality and shelve storage, this encodes the
key into ASCII. Make a different subclass if you need Unicode and your
storage supports it.
"""
key = target + ':' + from_address
return key.encode('ascii')
def get(self, target, from_address):
"""
Given a target and a from address, this returns a tuple of (expected_secret, pending_message_id).
If it doesn't find that target+from_address, then it should return a (None, None) tuple.
"""
return self.confirmations.get(self.key(target, from_address), (None, None))
def delete(self, target, from_address):
"""
Removes a target+from_address from the storage.
"""
try:
del self.confirmations[self.key(target, from_address)]
except KeyError:
pass
def store(self, target, from_address, expected_secret, pending_message_id):
"""
Given a target, from_address it will store the expected_secret and pending_message_id
of later verification. The target should be a string indicating what is being
confirmed. Like "subscribe", "post", etc.
When implementing your own you should *never* allow more than one target+from_address
combination.
"""
self.confirmations[self.key(target, from_address)] = (expected_secret,
pending_message_id)
class ConfirmationEngine(object):
"""
The confirmation engine is what does the work of sending a confirmation,
and verifying that it was confirmed properly. In order to use it you
have to construct the ConfirmationEngine (usually in settings module) and
you write your confirmation message templates for sending.
The primary methods you use are ConfirmationEngine.send and ConfirmationEngine.verify.
"""
def __init__(self, pending_queue, storage):
"""
The pending_queue should be a string with the path to the salmon.queue.Queue
that will store pending messages. These messages are the originals the user
sent when they tried to confirm.
Storage should be something that is like ConfirmationStorage so that this
can store things for later verification.
"""
self.pending = queue.Queue(pending_queue)
self.storage = storage
def get_pending(self, pending_id):
"""
Returns the pending message for the given ID.
"""
return self.pending.get(pending_id)
def push_pending(self, message):
"""
Puts a pending message into the pending queue.
"""
return self.pending.push(message)
def delete_pending(self, pending_id):
"""
Removes the pending message from the pending queue.
"""
self.pending.remove(pending_id)
def cancel(self, target, from_address, expect_secret):
"""
Used to cancel a pending confirmation.
"""
name, addr = parseaddr(from_address)
secret, pending_id = self.storage.get(target, addr)
if secret == expect_secret:
self.storage.delete(target, addr)
self.delete_pending(pending_id)
def make_random_secret(self):
"""
Generates a random uuid as the secret, in hex form.
"""
return uuid.uuid4().hex
def register(self, target, message):
"""
Don't call this directly unless you know what you are doing.
It does the job of registering the original message and the
expected confirmation into the storage.
"""
from_address = message.From
pending_id = self.push_pending(message)
secret = self.make_random_secret()
self.storage.store(target, from_address, secret, pending_id)
return "%s-confirm-%s" % (target, secret)
def verify(self, target, from_address, expect_secret):
"""
Given a target (i.e. "subscribe", "post", etc), a from_address
of someone trying to confirm, and the secret they should use, this
will try to verify their confirmation. If the verify works then
you'll get the original message back to do what you want with.
If the verification fails then you are given None.
The message is *not* deleted from the pending queue. You can do
that yourself with delete_pending.
"""
assert expect_secret, "Must give an expected ID number."
name, addr = parseaddr(from_address)
secret, pending_id = self.storage.get(target, addr)
if secret == expect_secret:
self.storage.delete(target, addr)
return self.get_pending(pending_id)
else:
return None
def send(self, relay, target, message, template, vars):
"""
This is the method you should use to send out confirmation messages.
You give it the relay, a target (i.e. "subscribe"), the message they
sent requesting the confirm, your confirmation template, and any
vars that template needs.
The result of calling this is that the template message gets sent through
the relay, the original message is stored in the pending queue, and
data is put into the storage for later calls to verify.
"""
confirm_address = self.register(target, message)
vars.update(locals())
msg = view.respond(vars, template, To=message['from'],
From="%(confirm_address)s@%(host)s",
Subject="Confirmation required")
msg['Reply-To'] = "%(confirm_address)s@%(host)s" % vars
relay.deliver(msg)
def clear(self):
"""
Used in testing to make sure there's nothing in the pending
queue or storage.
"""
self.pending.clear()
self.storage.clear() | /salmon-mail-3.2.0.tar.gz/salmon-mail-3.2.0/salmon/confirm.py | 0.76973 | 0.405331 | confirm.py | pypi |
from __future__ import print_function, unicode_literals
from email.utils import parseaddr
import mimetypes
import os
import warnings
import six
from salmon import bounce, encoding
# You can change this to 'Delivered-To' on servers that support it like Postfix
ROUTABLE_TO_HEADER = 'to'
def _decode_header_randomness(addr):
"""
This fixes the given address so that it is *always* a set() of
just email addresses suitable for routing.
"""
if not addr:
return set()
elif isinstance(addr, (list, tuple)):
addr_set = set()
for a in addr:
for returned_addr in _decode_header_randomness(a):
addr_set.add(returned_addr)
return addr_set
elif isinstance(addr, six.string_types):
return set([parseaddr(addr.lower())[1]])
elif isinstance(addr, six.binary_type):
addr = addr.decode()
return set([parseaddr(addr.lower())[1]])
else:
raise encoding.EncodingError("Address must be a string or a list not: %r", type(addr))
class MailRequest(object):
"""
This is what is given to your message handlers. The information you get out
of this is *ALWAYS* in Python str (unicode in Python 2.7) and should be
usable by any API. Modifying this object will cause other handlers that
deal with it to get your modifications, but in general you don't want to do
more than maybe tag a few headers.
"""
def __init__(self, Peer, From, To, Data):
"""
Peer is the remote peer making the connection (sometimes the queue
name). From and To are what you think they are. Data is the raw
full email as received by the server.
NOTE: It does not handle multiple From headers, if that's even
possible. It will parse the From into a list and take the first
one.
"""
self.Peer = Peer
self.Data = Data
try:
self.From = _decode_header_randomness(From).pop()
except KeyError:
self.From = None
try:
self.To = _decode_header_randomness(To).pop()
except KeyError:
self.To = None
self.base = encoding.from_string(self.Data)
if 'from' not in self.base:
self.base['from'] = self.From
if 'to' not in self.base:
# do NOT use ROUTABLE_TO here
self.base['to'] = self.To
self.From = self.From or self.base['from']
self.To = self.To or self.base[ROUTABLE_TO_HEADER]
self.bounce = None
def __repr__(self):
return "From: %r" % [self.Peer, self.From, self.To]
def all_parts(self):
"""Returns all multipart mime parts. This could be an empty list."""
return self.base.parts
def body(self):
"""
Always returns a body if there is one. If the message
is multipart then it returns the first part's body, if
it's not then it just returns the body. If returns
None then this message has nothing for a body.
"""
if self.base.parts:
return self.base.parts[0].body
else:
return self.base.body
def __contains__(self, key):
return self.base.__contains__(key)
def __getitem__(self, name):
return self.base.__getitem__(name)
def __setitem__(self, name, val):
self.base.__setitem__(name, val)
def __delitem__(self, name):
del self.base[name]
def __str__(self):
"""
Converts this to a string usable for storage into a queue or
transmission.
"""
return encoding.to_string(self.base)
def items(self):
return self.base.items()
def keys(self):
return self.base.keys()
def to_message(self):
"""
Converts this to a Python email message you can use to
interact with the python mail APIs.
"""
return encoding.to_message(self.base)
def walk(self):
"""Recursively walks all attached parts and their children."""
for x in self.base.walk():
yield x
def is_bounce(self, threshold=0.3):
"""
Determines whether the message is a bounce message based on
salmon.bounce.BounceAnalzyer given threshold. 0.3 is a good
conservative base.
"""
if not self.bounce:
self.bounce = bounce.detect(self)
return self.bounce.score > threshold
@property
def original(self):
warnings.warn("MailRequest.original is deprecated, use MailRequest.Data instead",
category=DeprecationWarning, stacklevel=2)
return self.Data
class MailResponse(object):
"""
You are given MailResponse objects from the salmon.view methods, and
whenever you want to generate an email to send to someone. It has
the same basic functionality as MailRequest, but it is designed to
be written to, rather than read from (although you can do both).
You can easily set a Body or Html during creation or after by
passing it as __init__ parameters, or by setting those attributes.
You can initially set the From, To, and Subject, but they are headers so
use the dict notation to change them: ``msg['From'] = 'joe@test.com'``.
The message is not fully crafted until right when you convert it with
MailResponse.to_message. This lets you change it and work with it, then
send it out when it's ready.
"""
def __init__(self, To=None, From=None, Subject=None, Body=None, Html=None):
self.Body = Body
self.Html = Html
self.base = encoding.MailBase([('To', To), ('From', From), ('Subject', Subject)])
self.multipart = self.Body and self.Html
self.attachments = []
def __contains__(self, key):
return self.base.__contains__(key)
def __getitem__(self, key):
return self.base.__getitem__(key)
def __setitem__(self, key, val):
return self.base.__setitem__(key, val)
def __delitem__(self, name):
del self.base[name]
def attach(self, filename=None, content_type=None, data=None, disposition=None):
"""
Simplifies attaching files from disk or data as files. To attach simple
text simple give data and a content_type. To attach a file, give the
data/content_type/filename/disposition combination.
For convenience, if you don't give data and only a filename, then it
will read that file's contents when you call to_message() later. If you
give data and filename then it will assume you've filled data with what
the file's contents are and filename is just the name to use.
"""
assert filename or data, "You must give a filename or some data to attach."
assert data or os.path.exists(filename), "File doesn't exist, and no data given."
self.multipart = True
if filename and not content_type:
content_type, encoding = mimetypes.guess_type(filename)
assert content_type, "No content type given, and couldn't guess from the filename: %r" % filename
self.attachments.append({
'filename': filename,
'content_type': content_type,
'data': data,
'disposition': disposition,
})
def attach_part(self, part):
"""
Attaches a raw MailBase part from a MailRequest (or anywhere)
so that you can copy it over.
"""
self.multipart = True
self.attachments.append({'filename': None,
'content_type': None,
'data': None,
'disposition': None,
'part': part,
})
def attach_all_parts(self, mail_request):
"""
Used for copying the attachment parts of a mail.MailRequest
object for mailing lists that need to maintain attachments.
"""
for part in mail_request.all_parts():
self.attach_part(part)
def clear(self):
"""
Clears out the attachments so you can redo them. Use this to keep the
headers for a series of different messages with different attachments.
"""
del self.attachments[:]
del self.base.parts[:]
self.multipart = False
def update(self, message):
"""
Used to easily set a bunch of headers from another dict like object.
"""
for k in message.keys():
self.base[k] = message[k]
def __str__(self):
"""
Converts to a string.
"""
return self.to_message().as_string()
def _encode_attachment(self, filename=None, content_type=None, data=None, disposition=None, part=None):
"""
Used internally to take the attachments mentioned in self.attachments
and do the actual encoding in a lazy way when you call to_message.
"""
if part:
self.base.parts.append(part)
elif filename:
if not data:
data = open(filename).read()
self.base.attach_file(filename, data, content_type, disposition or 'attachment')
else:
self.base.attach_text(data, content_type)
ctype = self.base.content_encoding['Content-Type'][0]
if ctype and not ctype.startswith('multipart'):
self.base.content_encoding['Content-Type'] = ('multipart/mixed', {})
def to_message(self):
"""
Figures out all the required steps to finally craft the
message you need and return it. The resulting message
is also available as a self.base attribute.
What is returned is a Python email API message you can
use with those APIs. The self.base attribute is the raw
salmon.encoding.MailBase.
"""
del self.base.parts[:]
if self.Body and self.Html:
self.multipart = True
self.base.content_encoding['Content-Type'] = ('multipart/alternative', {})
if self.multipart:
self.base.body = None
if self.Body:
self.base.attach_text(self.Body, 'text/plain')
if self.Html:
self.base.attach_text(self.Html, 'text/html')
for args in self.attachments:
self._encode_attachment(**args)
elif self.Body:
self.base.body = self.Body
self.base.content_encoding['Content-Type'] = ('text/plain', {})
elif self.Html:
self.base.body = self.Html
self.base.content_encoding['Content-Type'] = ('text/html', {})
return encoding.to_message(self.base)
def all_parts(self):
"""
Returns all the encoded parts. Only useful for debugging
or inspecting after calling to_message().
"""
return self.base.parts
def items(self):
return self.base.items()
def keys(self):
return self.base.keys() | /salmon-mail-3.2.0.tar.gz/salmon-mail-3.2.0/salmon/mail.py | 0.654564 | 0.167951 | mail.py | pypi |
from __future__ import print_function, unicode_literals
from functools import wraps
import re
BOUNCE_MATCHERS = {
'Action': re.compile(r'(failed|delayed|delivered|relayed|expanded)', re.IGNORECASE | re.DOTALL),
'Content-Description': re.compile(r'(Notification|Undelivered Message|Delivery Report)', re.IGNORECASE | re.DOTALL),
'Diagnostic-Code': re.compile(r'(.+);\s*([0-9\-\.]+)?\s*(.*)', re.IGNORECASE | re.DOTALL),
'Final-Recipient': re.compile(r'(.+);\s*(.*)', re.IGNORECASE | re.DOTALL),
'Received': re.compile(r'(.+)', re.IGNORECASE | re.DOTALL),
'Remote-Mta': re.compile(r'(.+);\s*(.*)', re.IGNORECASE | re.DOTALL),
'Reporting-Mta': re.compile(r'(.+);\s*(.*)', re.IGNORECASE | re.DOTALL),
'Status': re.compile(r'([0-9]+)\.([0-9]+)\.([0-9]+)', re.IGNORECASE | re.DOTALL)
}
BOUNCE_MAX = len(BOUNCE_MATCHERS) * 2.0
PRIMARY_STATUS_CODES = {
u'1': u'Unknown Status Code 1',
u'2': u'Success',
u'3': u'Temporary Failure',
u'4': u'Persistent Transient Failure',
u'5': u'Permanent Failure'
}
SECONDARY_STATUS_CODES = {
u'0': u'Other or Undefined Status',
u'1': u'Addressing Status',
u'2': u'Mailbox Status',
u'3': u'Mail System Status',
u'4': u'Network and Routing Status',
u'5': u'Mail Delivery Protocol Status',
u'6': u'Message Content or Media Status',
u'7': u'Security or Policy Status',
}
COMBINED_STATUS_CODES = {
u'00': u'Not Applicable',
u'10': u'Other address status',
u'11': u'Bad destination mailbox address',
u'12': u'Bad destination system address',
u'13': u'Bad destination mailbox address syntax',
u'14': u'Destination mailbox address ambiguous',
u'15': u'Destination mailbox address valid',
u'16': u'Mailbox has moved',
u'17': u'Bad sender\'s mailbox address syntax',
u'18': u'Bad sender\'s system address',
u'20': u'Other or undefined mailbox status',
u'21': u'Mailbox disabled, not accepting messages',
u'22': u'Mailbox full',
u'23': u'Message length exceeds administrative limit.',
u'24': u'Mailing list expansion problem',
u'30': u'Other or undefined mail system status',
u'31': u'Mail system full',
u'32': u'System not accepting network messages',
u'33': u'System not capable of selected features',
u'34': u'Message too big for system',
u'40': u'Other or undefined network or routing status',
u'41': u'No answer from host',
u'42': u'Bad connection',
u'43': u'Routing server failure',
u'44': u'Unable to route',
u'45': u'Network congestion',
u'46': u'Routing loop detected',
u'47': u'Delivery time expired',
u'50': u'Other or undefined protocol status',
u'51': u'Invalid command',
u'52': u'Syntax error',
u'53': u'Too many recipients',
u'54': u'Invalid command arguments',
u'55': u'Wrong protocol version',
u'60': u'Other or undefined media error',
u'61': u'Media not supported',
u'62': u'Conversion required and prohibited',
u'63': u'Conversion required but not supported',
u'64': u'Conversion with loss performed',
u'65': u'Conversion failed',
u'70': u'Other or undefined security status',
u'71': u'Delivery not authorized, message refused',
u'72': u'Mailing list expansion prohibited',
u'73': u'Security conversion required but not possible',
u'74': u'Security features not supported',
u'75': u'Cryptographic failure',
u'76': u'Cryptographic algorithm not supported',
u'77': u'Message integrity failure',
}
def match_bounce_headers(msg):
"""
Goes through the headers in a potential bounce message recursively
and collects all the answers for the usual bounce headers.
"""
matches = {'Content-Description-Parts': {}}
for part in msg.base.walk():
for k in BOUNCE_MATCHERS:
if k in part:
if k not in matches:
matches[k] = set()
# kind of an odd place to put this, but it's the easiest way
if k == 'Content-Description':
matches['Content-Description-Parts'][part[k].lower()] = part
matches[k].add(part[k])
return matches
def detect(msg):
"""
Given a message, this will calculate a probability score based on
possible bounce headers it finds and return a salmon.bounce.BounceAnalyzer
object for further analysis.
The detection algorithm is very simple but still accurate. For each header
it finds it adds a point to the score. It then uses the regex in BOUNCE_MATCHERS
to see if the value of that header is parsable, and if it is it adds another
point to the score. The final probability is based on how many headers and matchers
were found out of the total possible.
Finally, a header will be included in the score if it doesn't match in value, but
it WILL NOT be included in the headers used by BounceAnalyzer to give you meanings
like remote_mta and such.
Because this algorithm is very dumb, you are free to add to BOUNCE_MATCHERS in your
boot files if there's special headers you need to detect in your own code.
"""
originals = match_bounce_headers(msg)
results = {'Content-Description-Parts':
originals['Content-Description-Parts']}
score = 0
del originals['Content-Description-Parts']
for key in originals:
score += 1 # score still goes up, even if value doesn't parse
r = BOUNCE_MATCHERS[key]
scan = (r.match(v) for v in originals[key])
matched = [m.groups() for m in scan if m]
# a key is counted in the score, but only added if it matches
if len(matched) > 0:
score += len(matched) / len(originals[key])
results[key] = matched
return BounceAnalyzer(results, score / BOUNCE_MAX)
class BounceAnalyzer(object):
"""
BounceAnalyzer collects up the score and the headers and gives more
meaningful interaction with them. You can keep it simple and just use
is_hard, is_soft, and probable methods to see if there was a bounce.
If you need more information then attributes are set for each of the following:
* primary_status -- The main status number that determines hard vs soft.
* secondary_status -- Advice status.
* combined_status -- the 2nd and 3rd number combined gives more detail.
* remote_mta -- The MTA that you sent mail to and aborted.
* reporting_mta -- The MTA that was sending the mail and has to report to you.
* diagnostic_codes -- Human readable codes usually with info from the provider.
* action -- Usually 'failed', and turns out to be not too useful.
* content_parts -- All the attachments found as a hash keyed by the type.
* original -- The original message, if it's found.
* report -- All report elements, as salmon.encoding.MailBase raw messages.
* notification -- Usually the detailed reason you bounced.
"""
def __init__(self, headers, score):
"""
Initializes all the various attributes you can use to analyze the bounce
results.
"""
self.headers = headers
self.score = score
if 'Status' in self.headers:
status = self.headers['Status'][0]
self.primary_status = int(status[0]), PRIMARY_STATUS_CODES[status[0]]
self.secondary_status = int(status[1]), SECONDARY_STATUS_CODES[status[1]]
combined = "".join(status[1:])
self.combined_status = int(combined), COMBINED_STATUS_CODES[combined]
else:
self.primary_status = (None, None)
self.secondary_status = (None, None)
self.combined_status = (None, None)
if 'Remote-Mta' in self.headers:
self.remote_mta = self.headers['Remote-Mta'][0][1]
else:
self.remote_mta = None
if 'Reporting-Mta' in self.headers:
self.reporting_mta = self.headers['Reporting-Mta'][0][1]
else:
self.reporting_mta = None
if 'Final-Recipient' in self.headers:
self.final_recipient = self.headers['Final-Recipient'][0][1]
else:
self.final_recipient = None
if 'Diagnostic-Code' in self.headers:
self.diagnostic_codes = self.headers['Diagnostic-Code'][0][1:]
else:
self.diagnostic_codes = [None, None]
if 'Action' in self.headers:
self.action = self.headers['Action'][0][0]
else:
self.action = None
# these are forced lowercase because they're so damn random
self.content_parts = self.headers['Content-Description-Parts']
# and of course, this isn't the original original, it's the wrapper
self.original = self.content_parts.get('undelivered message', None)
if self.original and self.original.parts:
self.original = self.original.parts[0]
self.report = self.content_parts.get('delivery report', None)
if self.report and self.report.parts:
self.report = self.report.parts
self.notification = self.content_parts.get('notification', None)
def is_hard(self):
"""
Tells you if this was a hard bounce, which is determined by the message
being a probably bounce with a primary_status greater than 4.
"""
return self.probable() and self.primary_status[0] > 4
def is_soft(self):
"""Basically the inverse of is_hard()"""
return self.probable() and self.primary_status[0] <= 4
def probable(self, threshold=0.3):
"""
Determines if this is probably a bounce based on the score
probability. Default threshold is 0.3 which is conservative.
"""
return self.score > threshold
def error_for_humans(self):
"""
Constructs an error from the status codes that you can print to
a user.
"""
if self.primary_status[0]:
return "%s, %s, %s" % (self.primary_status[1],
self.secondary_status[1],
self.combined_status[1])
else:
return "No status codes found in bounce message."
class bounce_to(object):
"""
Used to route bounce messages to a handler for either soft or hard bounces.
Set the soft/hard parameters to the function that represents the handler.
The function should take one argument of the message that it needs to handle
and should have a route that handles everything.
WARNING: You should only place this on the START of modules that will
receive bounces, and every bounce handler should return START. The reason
is that the bounce emails come from *mail daemons* not the actual person
who bounced. You can find out who that person is using
message.bounce.final_recipient. But the bounce handler is *actually*
interacting with a message from something like MAILER-DAEMON@somehost.com.
If you don't go back to start immediately then you will mess with the state
for this address, which can be bad.
"""
def __init__(self, soft=None, hard=None):
self.soft = soft
self.hard = hard
assert self.soft and self.hard, "You must give at least soft and/or hard"
def __call__(self, func):
@wraps(func)
def bounce_wrapper(message, *args, **kw):
if message.is_bounce():
if message.bounce.is_soft():
return self.soft(message)
else:
return self.hard(message)
else:
return func(message, *args, **kw)
return bounce_wrapper | /salmon-mail-3.2.0.tar.gz/salmon-mail-3.2.0/salmon/bounce.py | 0.563138 | 0.165965 | bounce.py | pypi |
import logging
import random
import numpy as np
from salmon.triplets.samplers._round_robin import (
RoundRobin, _get_query, _score_query
)
from salmon.triplets.samplers.utils import Answer, Query
logger = logging.getLogger(__name__)
def _random_query(n: int):
return np.random.choice(n, size=3, replace=False).tolist()
class Validation(RoundRobin):
"""Ask about the same queries repeatedly"""
def __init__(self, n, d=2, n_queries=20, queries=None, ident=""):
"""
This sampler asks the same questions repeatedly, useful to evaluate
query difficulty.
Parameters
----------
n : int
Number of objects
ident : str
Identifier of the algorithm
n_queries : int, optional (default=20)
Number of validation queries.
d : int
Embedding dimension.
queries : List[Tuple[int, int, int]]
The list of queries to ask about. Each query is
``(head, obj1, obj2)`` where ``obj1`` and ``obj2`` are
randomly shown on the left and right. Each item in the tuple
is the `index` of the target to ask about. For example:
.. code-block:: python
queries=[(0, 1, 2), (3, 4, 5), (6, 7, 8)]
will first ask about a query with ``head_index=0``, then
``head_index=3``, then ``head_index=6``.
"""
self.n_queries = n_queries
if queries is None:
queries = []
while True:
q = _random_query(n)
if q not in queries:
queries.append(q)
if len(queries) == n_queries:
break
idx = [i for query in queries for i in query]
if n - 1 < max(idx):
raise ValueError(
f"The index {max(idx)} is included as an index for validation "
f"sampling, which is too large for the n={n} targets."
)
self._val_queries = queries
super().__init__(n=n, d=d, ident=ident)
def get_query(self, **kwargs):
idx = self.counter % len(self._val_queries)
if idx == 0:
random.shuffle(self._val_queries)
self.counter += 1
h, l, r = self._val_queries[idx]
if random.choice([True, False]):
l, r = r, l
return {"head": int(h), "left": int(l), "right": int(r)}, float(idx) | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/_validation.py | 0.860413 | 0.298491 | _validation.py | pypi |
import logging
import random
from copy import deepcopy
from typing import List, Tuple
import numpy as np
from salmon.backend.sampler import Sampler
from salmon.triplets.samplers.utils import Answer, Query
logger = logging.getLogger(__name__)
def _get_query(n, head) -> Tuple[int, int, int]:
a = head
while True:
b, c = np.random.choice(n, size=2)
if a != b and b != c and c != a:
break
return a, b, c
def _score_query(q: Tuple[int, int, int]) -> float:
h, l, r = q
score = max(abs(h - l), abs(h - r))
return float(score)
class _RoundRobin(Sampler):
"""
Let the head of the triplet query rotate through the available items while choosing the bottom two items randomly.
"""
def __init__(self, n, d=2, ident="", targets=None):
"""
Parameters
----------
n : int
Number of objects
ident : str
Identifier of the algorithm
targets : Optional[List[int]]
The allowable indexes to ask about.
"""
self.n = n
self.d = d
self.answers = []
self.counter = 0
self.order = None
self.targets = targets or list(range(n))
if targets is not None:
if not isinstance(targets, list):
msg = "Specify a list for targets. Got {} or type {}"
raise ValueError(msg.format(targets, type(targets)))
if not all(isinstance(i, int) for i in targets):
msg = "Not all items in targets are integers. Bad values are {}"
bad_vals = [v for v in targets if not isinstance(v, int)]
raise ValueError(msg.format(bad_vals))
if len(targets) < 3:
msg = "Specify at least 3 targets items. Got {} targets items"
raise ValueError(msg.format(len(targets)))
if max(targets) >= n:
msg = "At least one targets target is too large. Values too large include {}, larger than {}"
bad_vals = [v for v in targets if v >= n]
raise ValueError(msg.format(bad_vals, n))
super().__init__(ident=ident)
def get_query(self, **kwargs) -> Tuple[Query, float]:
if self.order is None:
self.order = deepcopy(self.targets)
idx = self.counter % len(self.order)
logger.warning("idx=%s", idx)
if idx == 0:
np.random.shuffle(self.order)
head = self.order[idx]
kids = set(self.targets) - {head}
a, b = np.random.choice(list(kids), size=2, replace=False)
self.counter += 1
score = max(abs(head - a), abs(head - b))
return {"head": int(head), "left": int(a), "right": int(b)}, float(score)
def process_answers(self, ans: List[Answer]):
return self, False
def run(self, *args, **kwargs):
from rejson import Path
root = Path.rootPath()
rj = self.redis_client()
rj.jsonset(f"stopped-{self.ident}", Path("."), True)
return None
class RoundRobin(_RoundRobin):
"""
Let the head of the triplet query rotate through the available items while choosing
the bottom two items randomly. This class is user specific if the
``/query?puid=foo`` endpoint is hit.
"""
def __init__(self, *args, **kwargs):
self.rr_args = args
self.rr_kwargs = kwargs
self.samplers = {} # puid to roundrobing
super().__init__(*args, **kwargs)
def get_query(self, puid: str = "") -> Tuple[Query, float]:
if puid not in self.samplers:
self.samplers[puid] = _RoundRobin(*self.rr_args, **self.rr_kwargs)
return self.samplers[puid].get_query()
def process_answers(self, ans: List[Answer]):
return self, True | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/_round_robin.py | 0.89214 | 0.32985 | _round_robin.py | pypi |
import logging
from time import sleep
from typing import List, Optional, Tuple
import numpy as np
from salmon.backend.sampler import Sampler
from salmon.triplets.samplers.utils import Answer, Query
logger = logging.getLogger(__name__)
def _get_query(targets: List[int]) -> Tuple[int, int, int]:
a, b, c = np.random.choice(targets, size=3, replace=False)
return int(a), int(b), int(c)
class Random(Sampler):
"""
Choose the triplet queries randomly, only ensuring objects are not repeated.
Parameters
----------
n : int
Number of objects
ident : str
Identifier of the algorithm
"""
def __init__(self, n, d=2, ident="", targets=None):
self.n = n
self.d = d
self.targets = targets or list(range(n))
if targets is not None:
if not isinstance(targets, list):
msg = "Specify a list for targets. Got {} or type {}"
raise ValueError(msg.format(targets, type(targets)))
if not all(isinstance(i, int) for i in targets):
msg = "Not all items in targets are integers. Bad values are {}"
bad_vals = [v for v in targets if not isinstance(v, int)]
raise ValueError(msg.format(bad_vals))
if len(targets) < 3:
msg = "Specify at least 3 targets items. Got {} targets items"
raise ValueError(msg.format(len(targets)))
if max(targets) >= n:
msg = "At least one targets target is too large. Values too large include {}, larger than {}"
bad_vals = [v for v in targets if v >= n]
raise ValueError(msg.format(bad_vals, n))
super().__init__(ident=ident)
def get_query(self, **kwargs) -> Tuple[Query, Optional[float]]:
h, a, b = _get_query(self.targets)
query = {"head": int(h), "left": int(a), "right": int(b)}
return query, -9999
def process_answers(self, ans: List[Answer]):
return self, False
def run(self, *args, **kwargs):
from rejson import Path
root = Path.rootPath()
rj = self.redis_client()
rj.jsonset(f"stopped-{self.ident}", Path("."), True)
return None | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/_random_sampling.py | 0.914986 | 0.28723 | _random_sampling.py | pypi |
from typing import Union
import numpy as np
import torch
import torch.nn as nn
ArrayLike = Union[np.ndarray, torch.Tensor]
class TripletDist(nn.Module):
"""
A base class to find losses for the triplet embedding problem.
Parameters
----------
n : int
d : int
Attributes
----------
embedding : np.ndarray
The current embedding.
"""
def __init__(self, n: int = None, d: int = 2, random_state=None):
super().__init__()
self.n = n
self.d = d
rng = np.random.RandomState(seed=random_state)
embedding = 1e-4 * rng.randn(n, d).astype("float32")
self._embedding = torch.nn.Parameter(
torch.from_numpy(embedding), requires_grad=True
)
def numpy_or_torch(self, f):
def wrapper(win2, lose2):
converted = False
if isinstance(win2, np.ndarray):
win2 = torch.from_numpy(win2)
lose2 = torch.from_numpy(lose2)
with torch.no_grad():
ret = f(win2, lose2)
return ret.detach().numpy()
return f(win2, lose2)
return wrapper
def losses(self, win2: ArrayLike, lose2: ArrayLike) -> ArrayLike:
"""
Calculate the losses of a this triplet model with the triplet
distances ``win2`` and ``lose2``. By default, the negative log
loss of ``self.probs(win2, lose2)``.
Parameters
----------
win2 : torch.Tensor, shape=(num_answers)
The squared Euclidean distance between the head vector and
winner vector. Formally, :math:`\|x_h - x_w\|_2^2`.
lose2 : torch.Tensor, shape=(num_answers)
The squared Euclidean distance between the head vector and
loser vector. Formally, :math:`\|x_h - x_l\|_2^2`.
Returns
-------
prob : torch.Tensor, shape=(num_answers)
The probability of triplets with those distances being satisfied.
"""
return -1 * torch.log(self.probs(win2, lose2))
@property
def embedding(self):
return self._embedding
def probs(self, win2, lose2):
return self.numpy_or_torch(self._probs)(win2, lose2)
def _get_dists(self, h_w_l):
H_W_L = h_w_l.T
h, w, l = H_W_L[0], H_W_L[1], H_W_L[2]
heads = self._embedding[h]
winners = self._embedding[w]
losers = self._embedding[l]
win_dist2 = torch.norm(heads - winners, dim=1) ** 2
lose_dist2 = torch.norm(heads - losers, dim=1) ** 2
return win_dist2, lose_dist2
def forward(self, h_w_l: ArrayLike, y=None, sample_weight=None) -> ArrayLike:
"""
Calculate the probability of a triplet being satisified
Parameters
----------
h_w_l : torch.Tensor, shape=(num_answers, 3)
Each row in this 2D array is (head, winner, loser) from
triplet query.
y : None, ignored.
sample_weight : None, ignored.
Returns
-------
losses : torch.Tensor, shape=(num_answers)
The loss of each individual triplet.
"""
win2, lose2 = self._get_dists(h_w_l)
return self.losses(win2, lose2)
class STE(TripletDist):
def _probs(self, win2, lose2):
## Double the computation
# num = torch.exp(-win2)
# return num / (num + torch.exp(-lose2))
## Less computation
# dist>0: agrees with embedding. <0: does not agree.
# d1 = win2, d2 = lose2
# 1 / (1 + exp(d1 - d2))
# = 1 / (1 + exp(-d2 + d1))
# = 1 / (1 + exp(-d2 / -d1))
# = exp(-d1) / (exp(-d1) + exp(d2))
# = prob of winning by STE
return 1 / (1 + torch.exp(win2 - lose2))
class TSTE(TripletDist):
"""
For details
"""
def __init__(self, n=None, d=2, alpha=1, random_state=None):
super().__init__(n=n, d=d, random_state=random_state)
self.alpha = alpha
def _probs(self, win2, lose2):
pwr = -(self.alpha + 1) / 2
t1 = (1 + (win2 / self.alpha)) ** pwr
t2 = (1 + (lose2 / self.alpha)) ** pwr
return t1 / (t1 + t2)
class CKL(TripletDist):
"""
The crowd kernel embedding.
"""
def __init__(self, n=None, d=2, mu=0.05, random_state=None):
super().__init__(n=n, d=d, random_state=random_state)
self.mu = mu
def _probs(self, win2, lose2):
num = self.mu + lose2
return num / (num + self.mu + win2)
class GNMDS(TripletDist):
"""
The global non-metric multidimensional scaling algorithm.
"""
def losses(self, win2, lose2):
zeros = torch.zeros(len(win2))
return torch.max(zeros, win2 - lose2 + 1)
def _probs(self, win2, lose2):
return win2 / (win2 + lose2 + 1e-6)
class SOE(GNMDS):
def losses(self, win2, lose2):
zeros = torch.zeros(len(win2))
return torch.max(zeros, torch.sqrt(win2) - torch.sqrt(lose2) + 1)
class Logistic(GNMDS):
r"""
.. math::
\text{loss}(x, y) = \sum_i \frac{\log(1 + \exp(-y[i]*x[i]))}{\text{x.nelement}()}
"""
def losses(self, win2: torch.Tensor, lose2: torch.Tensor) -> torch.Tensor:
# low loss if agrees
# high loss if disagrees
# win2 - lose2: positive if disagrees, negative if agrees
# embedding accurate -> win2 < lose2 -> win2-lose2 < 0 => negative
# embedding bad -> win2 > lose2 -> win2-lose2 > 0 => positive
# exp(x): large if x negative, small if x negative.
_pwrs = torch.cat((torch.tensor([0]), win2 - lose2))
loss = torch.logsumexp(_pwrs)
return loss | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/adaptive/_noise_models.py | 0.962312 | 0.612426 | _noise_models.py | pypi |
import numpy as np
from sklearn.base import BaseEstimator
import salmon.utils as utils
from salmon.triplets.samplers.adaptive.search import gram_utils, score
logger = utils.get_logger(__name__)
class QueryScorer:
"""
A class to score queries for adaptive searches.
Parameters
----------
embedding : array-like
The embedding of points.
probs : callable
Function to call to get probabilities. Called with
``probs(win2, lose2)``
where ``win2`` and ``lose2`` are the squared Euclidean distances
between the winner and loser.
Notes
-----
Inputs: include an embedding, noise model and history of answers
received.
Outputs: query scores.
Internal state: the posterior, history.
Public API:
* Update posterior.
* Get scores.
"""
def __init__(self, embedding=None, probs=None):
self.embedding = embedding
self.probs = probs
def _initialize(self):
self.initialized_ = True
n = len(self.embedding)
self._tau_ = np.zeros((n, n), dtype="float32")
self.push([])
def _random_queries(self, n, num=1000, trim=True):
new_num = int(num * 1.1 + 3)
rng = np.random.RandomState()
queries = rng.choice(n, size=(new_num, 3))
repeated = (
(queries[:, 0] == queries[:, 1])
| (queries[:, 1] == queries[:, 2])
| (queries[:, 0] == queries[:, 2])
)
queries = queries[~repeated]
if trim:
return queries[:num]
return queries
def _distances(self):
G = gram_utils.gram_matrix(self.embedding)
return gram_utils.distances(G)
def score(self):
raise NotImplementedError
def _posterior(self, D, history):
"""
Calculate the posterior.
Parameters
----------
D : array-like
Distance array. D[i, j] = ||x_i - x_j||_2^2
S : array-like, shape=(num_ans, 3)
History of answers.
Returns
-------
posterior : array-like, shape=(self.n, self.n)
"""
n = D.shape[0]
logger.info("history = %s", history)
if len(history):
for k, (head, w, l) in enumerate(history):
probs = self.probs(D[w], D[l])
probs[np.isnan(probs)] = 0
_eps = 1e-80
probs[probs <= _eps] = _eps
a = np.log(probs + _eps)
self._tau_[head] += a
# idx = self._tau_ >= -np.inf
tau = np.zeros_like(self._tau_)
# tau[idx] = np.exp(self._tau_[idx])
tau = np.exp(self._tau_)
s = tau.sum(axis=1) # the sum of each row
gt0 = s > 0
eps = s[gt0].min() if gt0.any() else s.min()
s *= 1e4
tau *= 1e4
s = np.clip(s, eps, np.inf)
tau2 = (tau.T / s).T # transpose to make broadcasting work
return tau2
def push(self, history):
if not hasattr(self, "initialized_"):
self._initialize()
D = self._distances()
self.posterior_ = self._posterior(D, history)
return self
class InfoGainScorer(QueryScorer):
def score(self, *, queries=None, num=1000):
"""
Score the queries using (almost) the information gain.
Parameters
----------
queries : List[int, int, int]
The list of queries to score.
num : int
Number of random queries to generate.
"""
if not hasattr(self, "initialized_"):
self._initialize()
D = self._distances()
if queries is not None and num != 1000:
raise ValueError("Only specify one of `queries` or `num`")
if queries is None:
queries = self._random_queries(len(self.embedding), num=num)
Q = np.array(queries).astype("int64")
H, O1, O2 = Q[:, 0], Q[:, 1], Q[:, 2]
scores = score(H, O1, O2, self.posterior_, D, probs=self.probs)
return Q, scores
class UncertaintyScorer(QueryScorer):
def score(self, *, queries=None, num=1000, trim=True):
"""
Score the queries using (almost) the information gain.
Parameters
----------
queries : List[int, int, int]
The list of queries to score.
num : int
Number of random queries to generate.
"""
if not hasattr(self, "initialized_"):
self._initialize()
D = self._distances()
if queries is not None and num != 1000:
raise ValueError("Only specify one of `queries` or `num`")
if queries is None:
queries = self._random_queries(len(self.embedding), num=num, trim=trim)
Q = np.array(queries).astype("int64")
H, O1, O2 = Q[:, 0], Q[:, 1], Q[:, 2]
# score is distance to the decision boundary.
scores = -np.abs(D[H, O1] - D[H, O2])
return Q, scores | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/adaptive/_score.py | 0.936088 | 0.576423 | _score.py | pypi |
from typing import Union
import numpy as np
import numpy.linalg as LA
import scipy
import torch
from scipy.linalg import eigh
from scipy.spatial import procrustes
Array = Union[np.ndarray, torch.Tensor]
def decompose(G, d):
"""
Arguments
---------
G : ndarray
Gram matrix; X @ X.T
d : int
Dimension of each vector in X; X.shape == (n, d)
when G.shape == (n, n)
Returns
-------
X : ndarray
Points that make up gram matrix
"""
assert_gram(G)
n = G.shape[0]
w, v = eigh(G)
i = [idx for idx in range(n - d, n)]
assert len(i) == d
X_hat = np.diag(np.sqrt(w[i])) @ (v[:, i]).T
return X_hat.T
def gram_matrix(X: Array) -> Array:
"""
Get Gram matrix from embedding
Arguments
---------
X : Array
Embedding. X.shape == (num_items, item_dim)
Returns
-------
G : Array
Gram matrix. G.shape == (n, n)
"""
if isinstance(X, torch.Tensor):
return X @ X.transpose(0, 1)
return X @ X.T
def distances(G: Array) -> Array:
"""
Get distance matrix from gram matrix
Arguments
---------
G : Array
Gram matrix. G.shape == (n, n) for n objects
Returns
-------
D : Array
Distance matrix. D.shape == (n, n)
"""
assert_gram(G)
G1 = np.diag(G).reshape(1, -1)
G2 = np.diag(G).reshape(-1, 1)
D = -2 * G + G1 + G2
return D
def dist2(G, a, b):
# assert_gram(G)
return G[a, a] + G[b, b] - 2 * G[a, b]
def is_psd(G, return_vals=False):
s = eigh(G, eigvals_only=True)
psd = 0 <= s.min() or s.min() > -3e-7
return psd if not return_vals else (psd, s)
def onto_psd(G, one=True, out=None):
"""
Project onto semi-positive definite cone
"""
# assert_gram(G)
if out is None:
out = G.copy()
s, v = eigh(out, eigvals=(0, 0))
assert s.shape == (1,)
assert v.shape[1] == 1
v = v.flat[:]
if s < 0:
out -= s * np.outer(v, v)
return out
def assert_embedding(X):
n, d = X.shape
assert n != d
def assert_gram(G):
pass
# if isinstance(G, torch.Tensor):
# assert torch.allclose(G, G.transpose(0, 1))
# m = torch.abs(torch.diag(G) / torch.norm(G))
# assert not np.allclose(m.sum().item(), 0)
# else:
# assert np.allclose(G, G.T)
# assert not np.allclose(np.diag(G) / LA.norm(G), 0)
def assert_distance(D):
pass
# assert np.abs(np.diag(D)).sum() < 1e-6
# assert np.allclose(D, D.T) | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/adaptive/search/gram_utils.py | 0.895182 | 0.714466 | gram_utils.py | pypi |
import itertools
import math
from time import time
from typing import Tuple, Union
import numpy as np
import numpy.linalg as LA
import torch
try:
from salmon.triplets.samplers.adaptive.search import gram_utils
except:
import gram_utils
Array = Union[np.ndarray, torch.Tensor]
def random_query(n):
rng = np.random.RandomState()
while True:
a = rng.choice(n)
b = rng.choice(n)
c = rng.choice(n)
if a != b and b != c and c != a:
break
return [a, b, c]
def embedding_matches(ans, D):
"""
Parameters
----------
ans = List[int], [head, winner, loser].
Indices of the head/winner/loser
D : np.ndarray
Distance matrix
Returns
-------
ans : int
1 if agrees with D, 0 if doesn't
"""
head, winner, loser = ans
return D[head, winner] < D[head, loser]
def decide(D, head, winner, loser, distance=False, random=True):
"""
Inputs
------
D : np.ndarray
Distance or embedding matrix.
If embedding, n points in d dimensions. D.shape == (n, d)
If distance, D[a, b] is distance between objects a and b.
head, winner, loser: int
Indices of head, winner and loser
Returns
-------
prob : float
The probability the triplet is satisfied
"""
if distance:
gram_utils.assert_distance(D)
d_winner = D[head, winner]
d_loser = D[head, loser]
else:
gram_utils.assert_embedding(D)
d_winner = LA.norm(D[head] - D[winner]) ** 2
d_loser = LA.norm(D[head] - D[loser]) ** 2
if not random:
q = [head, winner, loser] if d_winner < d_loser else [head, loser, winner]
return q, {}
# 0 < dist: agrees with embedding (d_loser > d_winner)
dist = d_loser - d_winner # >0: agrees with embedding. <0: does not agree.
try:
prob = 1 / (1 + np.exp(-dist))
except FloatingPointError:
prob = 0
# d1 = d_winner, d2 = d_loser
# = 1 / (1 + exp(-d2 + d1))
# = 1 / (1 + exp(-d2 / -d1))
# = exp(-d1) / (exp(-d1) + exp(d2))
meta = {"prob": prob, "dist": dist}
if np.random.rand() < prob:
return [head, winner, loser], meta
return [head, loser, winner], meta
def STE_probs(d1, d2, alpha=1):
"""
Returns the probability that triplet wins
"""
c = -(alpha + 1.0) / 2
t1 = (1 + d1 / alpha) ** c
t2 = (1 + d2 / alpha) ** c
return t1 / (t1 + t2)
def exp_STE_probs(d2_winner, d2_loser):
# dist>0: agrees with embedding. <0: does not agree.
# d1 = d2_winner, d2 = d2_loser
# 1 / (1 + exp(d1 - d2))
# = 1 / (1 + exp(-d2 + d1))
# = 1 / (1 + exp(-d2 / -d1))
# = exp(-d1) / (exp(-d1) + exp(d2))
# = prob of winning by STE
return 1 / (1 + np.exp(d2_winner - d2_loser))
def entropy(x):
if x.ndim == 1:
i = x > 0
y = x[i].copy()
return (-1 * y * np.log(y)).sum()
elif x.ndim == 2:
i = np.argwhere(x > 0)
y = x.copy()
idx = (i[:, 0], i[:, 1])
ret = -1 * y[idx] * np.log(y[idx])
y[idx] = ret
return np.sum(y, axis=1)
else:
raise ValueError("Invalid number of dimensions in input ``x``")
def posterior(D, S, alpha=1):
gram_utils.assert_distance(D)
n = D.shape[0]
tau = np.zeros((n, n))
for head, w, l in S:
tau[head] += np.log(STE_probs(D[w], D[l], alpha=alpha))
tau = np.exp(tau)
s = tau.sum(axis=1) # the sum of each row
tau = (tau.T / s).T
return tau
def getSTETripletProbability(i, j, k, alpha=1):
"""
Return the probability of triplet [i,l,j] where a is closer to b than c.
Namely:
pabc = (1 + || c - a||^2/alpha)**(-(alpha+1)/2)/(2*alpha + || b - a ||^2+|| c - a ||^2)
Inputs:
(numpy.ndarray) a : numpy array
(numpy.ndarray) b : numpy array
(numpy.ndarray) c : numpy array
(float) alpha : regularization parameter
"""
ki = LA.norm(k - i)
kj = LA.norm(k - j)
c = -(alpha + 1.0) / 2
return (1 + ki * ki / alpha) ** c / (
(1 + ki * ki / alpha) ** c + (1 + kj * kj / alpha) ** c
)
def posterior_orig(X, S, alpha=1):
n = X.shape[0]
tau = np.zeros((n, n))
# Loop over each query
for h, w, l in S:
# Multiply by the amount the query contributes to each tau
for i in range(n):
tau[h, i] = tau[h, i] + math.log(
getSTETripletProbability(X[w], X[l], X[i], alpha=alpha)
)
# Normalize -- make each row a PDF
for a in range(n):
tau[a] = np.exp(tau[a])
s = sum(tau[a])
tau[a] = tau[a] / s
return tau
def posterior_embed(X, S, alpha=1):
n = X.shape[0]
tau = np.zeros((n, n))
# Loop over each query
for h, w, l in S:
# Multiply by the amount the query contributes to each tau
for i in range(n):
tau[h, i] = tau[h, i] + math.log(
getSTETripletProbability(X[w], X[l], X[i], alpha=alpha)
)
# Normalize -- make each row a PDF
for i, a in enumerate(range(n)):
ex = np.exp(tau[a])
tau[a] = ex / ex.sum()
return tau
def _score_v1(q: Tuple[int, int, int], tau: np.ndarray, D: np.ndarray) -> float:
gram_utils.assert_distance(D)
# head, o1, o2 = q
head, w, l = q
probs = STE_probs(D[w], D[l]) # probs.shape == (n, )
eps = 1e-16
# mask = (eps < np.abs(probs)) & (eps < np.abs(tau[head]))
# mask = -1 < np.abs(probs) # all probs
p = (probs * tau[head]).sum() # tau[head].shape == (n, )
taub = tau[head] * probs
taub /= taub.sum()
tauc = tau[head] * (1 - probs)
tauc /= tauc.sum()
_entropy = -p * entropy(taub) - (1 - p) * entropy(tauc)
return _entropy
def score(H: Array, W: Array, L: Array, tau: Array, D: Array, probs=STE_probs) -> Array:
"""
Find the information gain for each query.
Arguments
---------
H, W, L : Array, Array, Array
1D arrays describing the head and two choices respectively.
If there are ``q`` questions, each array is of shape ``(q, )``.
tau : Array
Posterior, of shape ``(n, n)`` with ``n`` objects.
D : Array
Array of distances. Also of shape ``(n, n)``.
Returns
-------
ig : Array of shape ``(q, )``
The information gain of the queries (minus a constant).
Notes
-----
The information gain of a query is given by the following expression:
.. math::
H(\tau) - pH(\tau_b) - (1 - p)H(\tau_a)
where :math:`H` is the entropy. This
References
----------
[1] "Adaptively learning the crowd kernel" O. Tamuz, C. Liu,
S. Belongie, O. Shamir, and A. Kalai. 2011.
https://arxiv.org/abs/1105.1033
"""
gram_utils.assert_distance(D)
assert all(x.dtype.kind == "i" for x in (H, W, L))
head, w, l = H, W, L
q = len(head)
probs = probs(D[l], D[w]) # (q, n)
# probs = 1 - probs
probs[np.isnan(probs)] = 0
assert probs.min() >= 0
eps = 0e-20
# probs[probs < 1e-15] = 0
p = (probs * tau[head]).sum(axis=1) # (q, )
_taub = tau[head] * probs # (q, n)
_taub[np.isnan(_taub)] = eps
taub = _taub / (_taub.sum(axis=1).reshape(q, 1) + eps)
_tauc = tau[head] * (1 - probs) # (q, n)
_tauc[np.isnan(_tauc)] = eps
tauc = _tauc / (_tauc.sum(axis=1).reshape(q, 1) + eps)
score = -p * entropy(taub) - (1 - p) * entropy(tauc)
return score | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/triplets/samplers/adaptive/search/_search.py | 0.826572 | 0.642573 | _search.py | pypi |
import pathlib
import random
from copy import copy
from datetime import datetime, timedelta
from textwrap import dedent
from time import time
from typing import Dict, Union
import numpy as np
import requests as httpx
import ujson
from fastapi import FastAPI
from redis.exceptions import ResponseError
from rejson import Client, Path
from starlette.requests import Request
from starlette.staticfiles import StaticFiles
from starlette.templating import Jinja2Templates
from starlette_exporter import PrometheusMiddleware, handle_metrics
from salmon.frontend.utils import ServerException, image_url, sha256
from salmon.triplets import manager
from salmon.utils import get_logger
logger = get_logger(__name__)
root = Path.rootPath()
rj = Client(host="redis", port=6379, decode_responses=True)
def start_algs():
"""
Start the algorithm backend. This function is necessary because the
machine might be restarted (so the experiment isn't launched fresh).
"""
if "samplers" not in rj.keys():
return
names = rj.jsonget("samplers")
for name in names:
logger.info(f"Restarting alg={name}...")
r = httpx.post(f"http://localhost:8400/init/{name}")
assert r.status_code == 200
return True
def stop_algs():
rj.jsonset("reset", root, True)
return True
app = FastAPI(
title="Salmon",
description=dedent(
"""A prototype platform for crowdsourcing triplet queries.
\n\n***Warning!*** This platform is experimental and unstable.
"""
),
on_startup=[start_algs],
on_shutdown=[stop_algs],
)
app.add_middleware(PrometheusMiddleware)
app.add_route("/metrics", handle_metrics)
pkg_dir = pathlib.Path(__file__).absolute().parent
app.mount("/static", StaticFiles(directory=str(pkg_dir / "static")), name="static")
templates = Jinja2Templates(directory="templates")
async def _get_config():
return rj.jsonget("exp_config")
async def _ensure_initialized():
if "exp_config" not in rj:
raise ServerException("No data has been uploaded")
exp_config = await _get_config()
expected_keys = ["targets", "samplers", "n", "sampling", "html"]
html_keys = [
"instructions",
"max_queries",
"debrief",
"skip_button",
"css",
"arrow_keys",
]
err = False
if not set(exp_config) == set(expected_keys):
err = True
extra = set(exp_config) - set(expected_keys)
missing = set(expected_keys) - set(exp_config)
if "html" in exp_config and not set(html_keys).issubset(set(exp_config["html"])):
err = True
extra = set() # exp_config["html"]) - set(expected_keys)
missing = set(expected_keys) - set(exp_config["html"])
if err:
msg = (
"Experiment keys are not correct. Expected {}, got {}.\n\n"
"Extra keys: {}\n"
"Missing keys: {}"
)
raise ServerException(
msg.format(expected_keys, list(exp_config.keys()), extra, missing)
)
return exp_config
@app.get("/", tags=["public"])
async def get_query_page(request: Request, puid: str = ""):
"""
Load the query page and present a "triplet query".
"""
exp_config = await _ensure_initialized()
if puid == "":
uid = "salmon-{}".format(np.random.randint(2 ** 32 - 1))
puid = sha256(uid)[:16]
try:
urls = [image_url(t) for t in exp_config["targets"]]
except:
urls = []
items = {
"puid": puid,
"targets": exp_config["targets"],
"samplers_per_user": exp_config["sampling"]["samplers_per_user"],
"urls": urls,
"details": exp_config["sampling"]["details"],
"html": exp_config["html"],
}
items.update(request=request)
return templates.TemplateResponse("query_page.html", items)
@app.get("/query", tags=["public"])
async def get_query(sampler="", puid="") -> Dict[str, Union[int, str, float]]:
if sampler == "":
samplers = rj.jsonget("samplers")
probs = rj.jsonget("sampling_probs")
idx = np.random.choice(len(samplers), p=probs)
sampler = samplers[idx]
host = f"http://localhost:8400"
endpoint = f"/query/{sampler}"
if puid:
endpoint = endpoint + f"?puid={puid}"
r = httpx.get(host + endpoint)
if r.status_code == 200:
logger.info(f"query r={r}")
return r.json()
key = f"alg-{sampler}-queries"
logger.info(f"zpopmax'ing {key}")
queries = rj.zpopmax(key)
if len(queries):
serialized_query, score = queries[0]
q = manager.deserialize_query(serialized_query)
else:
config = await _get_config()
q = manager.random_query(config["n"])
score = -9999
return {"sampler": sampler, "score": score, **q}
@app.post("/answer", tags=["public"])
async def process_answer(ans: manager.Answer):
"""
Process the answer, and append the received answer (alongside participant
UID) to the database.
See the <a href='#model-Answer'>Answer schema</a> for more detail.
Returns
-------
`d : Dict[str, bool]`. On success, `d == {"success": True}`
"""
d = ujson.loads(ans.json())
_update = {
"time_received": round(time(), 3),
"loser": d["left"] if d["winner"] == d["right"] else d["right"],
}
d.update(_update)
ident = d["sampler"]
logger.warning(f"answer received: {d}")
rj.jsonarrappend(f"alg-{ident}-answers", root, d)
# on backend, key = f"alg-{self.ident}-answers"
rj.jsonarrappend("all-responses", root, d)
last_save = rj.lastsave()
# Save every 15 minutes
if (datetime.now() - last_save) >= timedelta(seconds=60 * 15):
try:
rj.bgsave()
except ResponseError as e:
if "Background save already in progress" not in str(e):
raise e
return {"success": True} | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/frontend/public.py | 0.665302 | 0.166981 | public.py | pypi |
import json
from datetime import datetime, timedelta
from math import pi
from typing import List
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import requests
from bokeh.embed import json_item
from bokeh.models import (ColumnDataSource, Grid, ImageURL, Legend, LinearAxis,
NumeralTickFormatter, Plot, Text, tickers)
from bokeh.palettes import brewer, d3
from bokeh.plotting import figure, show
from salmon.frontend.utils import get_logger, image_url
logger = get_logger(__name__)
def _make_hist(
title,
xlabel,
hist,
edges,
width=600,
height=200,
toolbar_location="right",
**kwargs,
):
p = figure(
title=title,
background_fill_color="#fafafa",
width=width,
height=height,
toolbar_location=toolbar_location,
**kwargs,
)
p.quad(
top=hist,
bottom=0,
left=edges[:-1],
right=edges[1:],
fill_color="blue",
line_color="white",
alpha=0.5,
)
p.y_range.start = 0
p.legend.location = "center_right"
p.legend.background_fill_color = "#fefefe"
p.xaxis.axis_label = xlabel
p.yaxis.axis_label = "Frequency"
return p
async def _get_unique(series: pd.Series):
assert series.nunique() == 1
return series.iloc[0]
async def _get_nbins(x: np.array) -> int:
if len(x) <= 10:
return 10
total_days = (np.nanmax(x) - np.nanmin(x)) / (60 * 60 * 24)
bins = max(10, total_days * 4)
return int(bins)
async def activity(df: pd.DataFrame, start_sec: float):
x = df["time_received"].values.copy()
bins = await _get_nbins(x)
logger.info(f"bins = {bins}")
bin_heights, edges = np.histogram(x, bins=bins)
start = datetime(1970, 1, 1) + timedelta(seconds=start_sec)
edges = [timedelta(seconds=e) + start for e in edges]
_start = start.isoformat()[: 10 + 6]
xlabel = f"\nTime received"
p = _make_hist(
f"Time responses received",
xlabel,
bin_heights,
edges,
toolbar_location=None,
x_axis_type="datetime",
)
p.xaxis.major_label_orientation = pi / 4
return p
async def _remove_outliers(x, low=True, high=True, frac=0.10):
"""Remove outliers ``frac`` fraction of the data"""
p = (frac * 100) / 2
_high = np.percentile(x, 100 - p)
_low = np.percentile(x, p)
if low and high:
good = (x >= _low) & (x <= _high)
elif low:
good = x >= _low
elif high:
good = x <= _high
return x[good]
async def response_time(df: pd.DataFrame):
x = df["response_time"].values.copy()
if len(x) >= 100:
x = await _remove_outliers(x, low=False, high=True)
bins = await _get_nbins(x)
bin_heights, edges = np.histogram(x, bins=bins)
p = _make_hist(
f"Response time",
"Time (s)",
bin_heights,
edges,
width=300,
toolbar_location=None,
)
return p
async def network_latency(df: pd.DataFrame):
x = df["network_latency"].values.copy()
if len(x) >= 100:
x = await _remove_outliers(x, low=False, high=True)
bins = await _get_nbins(x)
bin_heights, edges = np.histogram(x, bins=bins)
p = _make_hist(
f"Time waiting for new query",
"Time (s)",
bin_heights,
edges,
width=300,
toolbar_location=None,
)
return p
async def show_embedding(embedding: np.ndarray, targets: List[str], alg: str = ""):
embedding = np.asarray(embedding)
# scale each dimension between 0 and 1
for dim in range(embedding.shape[1]):
embedding[:, dim] = embedding[:, dim] - embedding[:, dim].min()
embedding[:, dim] /= embedding[:, dim].max()
embedding[:, dim] -= 0.5
images = [
k for k, target in enumerate(targets) if "img" in target or "video" in target
]
image_urls = [image_url(target) for k, target in enumerate(targets) if k in images]
data = {
"x": embedding[images, 0],
"y": embedding[images, 1] if len(embedding[0]) > 1 else embedding[images, 0],
"image_url": image_urls,
}
source = ColumnDataSource(data=data)
plot = figure(
title=alg,
plot_width=600,
plot_height=500,
toolbar_location="right",
background_fill_color="#fafafa",
)
# glyph = Text(x="x", y="y", text="text", angle=0.3, text_color="#96deb3")
w = h = {"units": "data", "value": 0.1}
w = h = {"units": "screen", "value": 80}
glyph = ImageURL(x="x", y="y", url="image_url", w=w, h=h)
plot.add_glyph(source, glyph)
text = [k for k in range(len(targets)) if k not in images]
data = {
"x": embedding[text, 0],
"y": embedding[text, 1] if len(embedding[0]) > 1 else embedding[text, 0],
"text": [target for k, target in enumerate(targets) if k in text],
}
glyph = Text(x="x", y="y", text="text")
source = ColumnDataSource(data=data)
plot.add_glyph(source, glyph)
return plot
async def _get_server_metrics():
base = "http://prom:9090"
start = datetime.now() - timedelta(days=1)
end = datetime.now()
data = {
"query": "starlette_request_duration_seconds_bucket",
"start": start.isoformat(),
"end": end.isoformat(),
"step": 0.1,
}
r = requests.post(base + "/api/v1/query", data=data)
assert r.status_code == 200
raw = r.json()
assert raw["status"] == "success"
rare = raw["data"]
assert rare["resultType"] == "vector"
med_rare = rare["result"]
assert all(len(m["value"]) == 2 for m in med_rare)
assert all(set(m.keys()) == {"value", "metric"} for m in med_rare)
medium = [{"value": m["value"][1], **m["metric"]} for m in med_rare]
df = pd.DataFrame(medium)
df["value"] = df["value"].astype(float)
df["le"] = df["le"].astype(float)
cols = ["value", "le", "path"]
proc = df[cols]
proc.columns = ["count", "le", "endpoint"]
bad_endpoints = [
"/favicon.ico",
"/metrics",
"/api/v1/query",
"/static",
"/init_exp",
]
idx = proc.endpoint.isin(bad_endpoints)
idx |= proc.endpoint.isin([e + "/" for e in bad_endpoints])
proc = proc[~idx].copy()
return proc
async def _process_endpoint_times(p, endpoint):
e = endpoint
base = pd.DataFrame([{"count": 0, "le": 0, "endpoint": e}])
p = p.append(base)
p = p.sort_values(by="le")
between = p["count"].diff()
limits = p["le"].values
upper = limits
idx = np.arange(len(p))
lower = limits[idx - 1]
df = pd.DataFrame(
{"between": between, "upper": upper, "lower": lower, "endpoint": e}
)
df["prob"] = df["between"] / df["between"].sum()
return df.copy()
async def get_endpoint_time_plots():
proc = await _get_server_metrics()
endpoints = proc.endpoint.unique()
dfs = {
e: await _process_endpoint_times(proc[proc.endpoint == e], e) for e in endpoints
}
out = {}
for e, df in dfs.items():
logger.info(df.columns)
x = [
str(xi) if xi >= 0.1 or xi <= 0 else "{}ms".format(int(xi * 1000))
for xi in df.upper.unique()
]
p = figure(
x_range=x,
plot_height=150,
toolbar_location=None,
title=f"{e} processing time",
width=500,
tools="",
background_fill_color="#fafafa",
)
_data = {k: df[k].values.tolist() for k in ["upper", "between"]}
_data["upper"] = [str(k) for k in _data["upper"]]
source = ColumnDataSource(_data)
p.vbar(x=x, top=_data["between"], width=0.9, line_color="#" + "e" * 6)
p.yaxis.axis_label = "Frequency"
p.xaxis.axis_label = "Processing time (s)"
p.yaxis.minor_tick_line_color = None # turn off x-axis minor ticks
hits = np.asarray(_data["between"])
hits = hits[~np.isnan(hits)]
if hits.sum() > 1: # Only put plots in that have more than 1 hit
out[e] = p
return out
async def _get_alg_perf(df, agg="median"):
cols = [c for c in df.columns if "time_" == c[:5] and c != "time_loop"]
s = df[cols + ["time"]].copy()
partial = df[list(cols)].copy().to_numpy()
s["timedelta"] = pd.to_timedelta(s["time"] - s["time"].min(), unit="s")
s = s.sort_values(by="timedelta")
source = ColumnDataSource(s)
names = {"_".join(v.split("_")[:-1]) for v in cols}
lims = (partial.min(), partial.max())
p = figure(
title="Algorithm timing",
x_axis_label="Time since start",
y_axis_label="Time spent per task (s)",
x_axis_type="datetime",
width=600,
height=300,
toolbar_location="above",
background_fill_color="#fafafa",
)
colors = d3["Category10"][10]
for name, color in zip(names, colors):
base = dict(x="timedelta", y=f"{name}_{agg}", source=source)
p.line(**base, legend_label=name, line_width=2, line_color=color)
p.circle(**base, size=5, color=color)
p.legend.location = "top_left"
return p
async def response_rate(df, n_sec=30):
df = df.copy()
df["time_since_start"] = df["time_received"] - df["time_received"].min()
df["response_received"] = 1
s = df[["response_received", "time_since_start"]].copy()
s["time_since_start"] = s.time_since_start.apply(lambda x: timedelta(seconds=x))
base = s.copy()
for _sec in [1e-3, n_sec + 1e-3]:
t = base.copy()
t["time_since_start"] += timedelta(seconds=_sec)
t["response_received"] = 0
s = pd.concat((t, s))
s = s.sort_values(by="time_since_start").set_index("time_since_start")
ss = s.rolling(window=f"{n_sec}s").sum() / n_sec
source = ColumnDataSource(ss)
p = figure(
title="Responses per second",
x_axis_type="datetime",
x_axis_label="Time since start",
y_axis_label=f"({n_sec}s moving avg)",
width=600,
height=200,
toolbar_location="above",
background_fill_color="#fafafa",
)
p.varea(x="time_since_start", y1="response_received", source=source)
return p
async def _get_query_db(df, agg="median"):
d = df.copy()
d["time_since_start"] = d["time"] - d["time"].min()
d["datetime"] = d["time_since_start"].apply(
lambda x: datetime(1970, 1, 1) + timedelta(seconds=x)
)
source = ColumnDataSource(d)
Y = [c for c in d.columns if ("n_queries" in c) and (agg in c)]
ratio = df[Y].max().max() / df[Y].min().min()
kwargs = {} if ratio < 50 else dict(y_axis_type="log")
p = figure(
title="Database queries",
x_axis_type="datetime",
x_axis_label="Time since start",
y_axis_label="Number of queries",
width=600,
height=200,
toolbar_location="above",
background_fill_color="#fafafa",
**kwargs,
)
if not len(Y):
logger.warning(f"No columns to plot! Y = {Y} but d.columns = {d.columns}")
return None
COLORS = d3["Category10"][len(Y)]
lines = []
for y, color in zip(Y, COLORS):
base = dict(x="datetime", y=y, source=source)
line = p.line(**base, line_color=color, line_width=2)
p.circle(**base, size=5, color=color)
lines.append([line])
names = [y.replace("n_queries_", "").replace(f"_{agg}", "") for y in Y]
items = list(zip(names, lines))
legend = Legend(items=items, location="top_left") # , label_width=130)
p.add_layout(legend, "right")
return p
async def _get_response_rate_plots(timestamps: pd.Series):
"""
Parameters
----------
timestamps : pd.Series
Seconds responses received.
"""
timestamps = timestamps.sort_values()
timestamps -= timestamps.min()
window = 1
_rates_per_sec = (timestamps.copy() / window).astype(int).value_counts()
rates_per_sec = _rates_per_sec.value_counts().sort_index()
rates = rates_per_sec.index
prob = rates_per_sec.to_numpy() / rates_per_sec.sum()
rates = rates[prob >= 0.01]
prob = prob[prob >= 0.01]
rates = np.array(rates.tolist() + [rates.max() + 1])
rates = rates - 1
rates = (rates * 1.0) / window
x = rates.copy()
p1 = _make_hist(
"Rate responses received",
f"Rate (responses/sec over {window}s)",
prob,
x,
width=300,
toolbar_location=None,
x_range=(x.min(), x.max()),
)
p1.xaxis.ticker = tickers.BasicTicker(min_interval=1)
p1.xaxis[0].formatter = NumeralTickFormatter(format="0,0")
p1.yaxis.axis_label = "Probability (empirical)"
p1.yaxis[0].formatter = NumeralTickFormatter(format="0%")
gaps = timestamps.diff().dropna()
_bins = [[1 * 10 ** i, 2 * 10 ** i, 5 * 10 ** i] for i in range(-5, 5)]
bins = [
b
for bins3 in _bins
for b in bins3
if np.percentile(gaps, 1) <= b <= np.percentile(gaps, 99)
]
bin_heights, edges = np.histogram(gaps, bins=bins)
p2 = _make_hist(
f"Delay between responses",
"Time (s)",
bin_heights,
edges,
width=300,
toolbar_location=None,
x_axis_type="log",
)
meta = {
"median_response_delay": "{:0.2f}".format(np.median(gaps)),
"rate_mean": "{:0.2f}".format(_rates_per_sec.mean() / window),
"rate_window": window,
}
return p1, p2, meta | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/frontend/plotting.py | 0.792223 | 0.549218 | plotting.py | pypi |
import itertools
import random
from gc import collect as garbage_collect
from pprint import pprint
from time import sleep, time
from typing import Any, Dict, List, Optional, Tuple, TypeVar
import numpy as np
from salmon.utils import flush_logger, get_logger
logger = get_logger(__name__)
Query = TypeVar("Query")
Answer = TypeVar("Answer")
class Sampler:
"""
Run a sampling algorithm. Provides hooks to connect with the database and
the Dask cluster.
Parameters
----------
ident : str
The algorithm idenfifier. This value is used to identify the algorithm
in the database.
"""
def __init__(self, ident: str = ""):
self.ident = ident
self.meta_ = []
def redis_client(self, decode_responses=True) -> "RedisClient":
"""
Get the database (/Redis client)
"""
from rejson import Client as RedisClient
return RedisClient(host="redis", port=6379, decode_responses=decode_responses)
def run(self, client: "DaskClient"):
"""
Run the algorithm.
Parameters
----------
client : DaskClient
A client to Dask.
rj : RedisClient
A Redist Client, a rejson.Client
Notes
-----
This function runs the adaptive algorithm. Because it's asynchronous,
this function should return if
``"reset" in rj.keys() and rj.jsonget("reset")``.
"""
import dask.distributed as distributed
from redis.exceptions import ResponseError
from rejson import Path
root = Path.rootPath()
rj = self.redis_client()
answers: List = []
logger.info(f"Staring {self.ident}")
def submit(fn: str, *args, allow_other_workers=True, **kwargs):
if "workers" in kwargs:
kwargs.update({"allow_other_workers": allow_other_workers})
return client.submit(getattr(type(self), fn), *args, **kwargs,)
update = False
queries = np.array([])
scores = np.array([])
n_model_updates = 0
rj.jsonset(f"alg-perf-{self.ident}", root, [])
save_deadline = 0.0 # right away
data: List[Dict[str, Any]] = []
error_raised: List[int] = []
for k in itertools.count():
try:
loop_start = time()
datum = {"iteration": k, "ident": self.ident, "time": time()}
answers = self.get_answers(rj, clear=True)
datum["num_answers"] = len(answers)
self_future = client.scatter(self)
_start = time()
if len(queries) and len(scores):
queries_f = client.scatter(queries)
scores_f = client.scatter(scores)
else:
queries_f = scores_f = []
if update:
datum["cleared_queries"] = True
__start = time()
self.clear_queries(rj)
datum["time_clearing"] = time() - __start
else:
datum["cleared_queries"] = False
done = distributed.Event(name="pa_finished")
done.clear()
workers = list(client.has_what())
random.shuffle(workers)
f_post = submit(
"post_queries",
self_future,
queries_f,
scores_f,
done=done,
workers=workers[0],
)
f_model = submit(
"process_answers", self_future, answers, workers=workers[1],
)
f_search = submit(
"get_queries", self_future, stop=done, workers=workers[2],
)
time_model = 0.0
time_post = 0.0
time_search = 0.0
def _model_done(_):
nonlocal time_model
nonlocal done
done.set()
time_model += time() - _start
def _post_done(_):
nonlocal time_post
time_post += time() - _start
def _search_done(_):
nonlocal time_search
time_search += time() - _start
f_model.add_done_callback(_model_done)
f_post.add_done_callback(_post_done)
f_search.add_done_callback(_search_done)
# Future.result raises errors automatically
posted = f_post.result()
new_self, update = f_model.result()
queries, scores, search_meta = f_search.result()
_datum_update = {
"n_queries_posted": posted,
"n_queries_scored": len(queries),
"n_queries_in_db": rj.zcard(f"alg-{self.ident}-queries"),
"model_updated": update,
"n_model_updates": n_model_updates,
"time_posting_queries": time_post,
"time_model_update": time_model,
"time_search": time_search,
"time": time(),
**search_meta,
}
datum.update(_datum_update)
if update:
_s = time()
self.__dict__.update(new_self.__dict__)
datum["time_update"] = time() - _s
n_model_updates += 1
if time() > save_deadline + 1e-3:
save_deadline = time() + 60
_s = time()
self.save()
datum["time_save"] = time() - _s
datum["time_loop"] = time() - loop_start
data.append(datum)
logger.info(datum)
posting_deadline = data[0]["time"] + 2 * 60
if time() >= posting_deadline or k == 10 or k == 20:
flush_logger(logger)
keys = data[-1].keys()
to_post = {}
for _k in keys:
vals = [d.get(_k, None) for d in data]
vals = [v for v in vals if v]
if not len(vals):
continue
if isinstance(vals[0], (int, np.integer)):
Type = int
elif isinstance(vals[0], (float, np.floating)):
Type = float
else:
continue
_update = {
f"{_k}_median": np.median(vals),
f"{_k}_mean": np.mean(vals),
f"{_k}_min": np.min(vals),
f"{_k}_max": np.max(vals),
}
if _k == "time":
_update = {"time": _update["time_median"]}
to_post.update({_k: Type(v) for _k, v in _update.items()})
try:
rj.jsonarrappend(f"alg-perf-{self.ident}", root, to_post)
except ResponseError as e:
if (
"could not perform this operation on a key that doesn't exist"
in str(e)
):
# I think this happens when the frontend deletes
# the database when /reset is triggered
pass
else:
raise e
data = []
if "reset" in rj.keys() and rj.jsonget("reset", root):
logger.warning(f"Resetting {self.ident}")
self.reset(client, rj, futures=[f_model, f_post, f_search])
break
except Exception as e:
logger.exception(e)
flush_logger(logger)
error_raised.append(k)
__n = 5
if np.diff(error_raised[-__n:]).tolist() == [1] * (__n - 1):
logger.exception(e)
flush_logger(logger)
raise e
return True
def save(self) -> bool:
"""
Save the sampler's state and current embedding to the database.
"""
import cloudpickle
rj2 = self.redis_client(decode_responses=False)
out = cloudpickle.dumps(self)
rj2.set(f"state-{self.ident}", out)
try:
out = cloudpickle.dumps(self.get_model())
except NotImplementedError:
pass
else:
rj2.set(f"model-{self.ident}", out)
return True
def reset(self, client, rj, futures=None):
"""
Stop the algorithm. The algorithm will be deleted shortly after
this function is called.
"""
from rejson import Client as RedisClient
from rejson import Path
root = Path.rootPath()
reset = rj.jsonget("reset", root)
logger.warning("reset=%s for %s", reset, self.ident)
if not reset:
return False
logger.warning(f"Deleting various keys for {self.ident}")
rj2 = RedisClient(host="redis", port=6379, decode_responses=False)
rj2.delete(f"state-{self.ident}")
rj2.delete(f"model-{self.ident}")
rj.jsondel(f"alg-perf-{self.ident}", root)
rj.delete(f"alg-perf-{self.ident}")
# Clear answers
logger.warning(f"Clearing answers for {self.ident}")
self.get_answers(rj, clear=True)
# Clear queries (twice)
logger.warning(f"Clearing queries for {self.ident}")
key = f"alg-{self.ident}-queries"
for k in range(4, 18):
limit = 2 ** k
rj.zremrangebyscore(key, -limit, limit)
sleep(0.1)
n_queries = rj.zcard(key)
logger.warning(f"n_queries={n_queries}")
if not n_queries:
break
logger.warning(f"Clearing queries again for {self.ident}")
self.clear_queries(rj)
if futures:
for future in futures:
if future:
client.cancel(future, force=True)
logger.warning(f"Restarting Dask client for {self.ident}")
f = client.restart(timeout="5s")
try:
client.sync(f)
except:
pass
client.run(garbage_collect)
logger.warning(f"Setting stopped-{self.ident}")
rj.jsonset(f"stopped-{self.ident}", Path("."), True)
logger.warning(f"All done stopping {self.ident}")
return True
@property
def clear(self):
"""
Should the queries be cleared from the database?
"""
return True
def process_answers(self, answers: List[Answer]):
"""
Process answers.
Parameters
----------
answers : List[Answers]
Each answer is a dictionary. Each answer certainly has the keys
"head", "left", "right" and "winner", and may have the key
"puid" for participant UID.
Returns
-------
data : dict
An update to self.__dict__.
"""
raise NotImplementedError
def get_queries(self) -> Tuple[List[Query], List[float]]:
"""
Get queries.
Returns
-------
queries : List[Query]
The list of queries
scores : List[float]
The scores for each query. Higher scores are sampled more
often.
meta : Dict[str, Any]
Information about the search.
Notes
-----
The scores have to be unique. The underlying implementation does
not sample queries of the same score unbiased.
"""
return [], [], {}
def get_model(self) -> Dict[str, Any]:
"""
Get the model underlying the algorithm.
Returns
-------
state : Dict[str, Any]
The state of the algorithm. This can be used for display on the
dashboard or with an HTTP get request.
"""
raise NotImplementedError
def clear_queries(self, rj: "RedisClient") -> bool:
"""
Clear all queries that this sampler has posted from the database.
"""
rj.delete(f"alg-{self.ident}-queries")
return True
def post_queries(
self,
queries: List[Query],
scores: List[float],
rj: Optional["RedisClient"] = None,
done=None,
) -> int:
"""
Post scored queries to the database.
Parameters
----------
queries : List[Query]
Queries to post to the database
scores : List[float]
The scores for each query
rj : RedisClient, optional
The databaase
Returns
-------
n_queries : int
The number of queries posted to the database.
"""
if rj is None:
rj = self.redis_client()
if not len(queries):
return 0
if isinstance(queries, np.ndarray) and isinstance(scores, np.ndarray):
idx = np.argsort(-1 * scores)
assert (
len(scores) == queries.shape[0]
), f"Different lengths {scores.shape}, {queries.shape}"
scores = scores[idx] # high to low scores
queries = queries[idx]
valid = ~np.isnan(scores)
scores = scores[valid]
queries = queries[valid]
high = scores[0]
low = scores[-1]
assert low <= high, f"high={high} to low={low} scores"
chunk_size = 2000
n_chunks = len(queries) // chunk_size
split_queries = np.array_split(queries, max(n_chunks, 1))
split_scores = np.array_split(scores, max(n_chunks, 1))
n_queries = 0
name = self.ident
key = f"alg-{name}-queries"
for _queries, _scores in zip(split_queries, split_scores):
queries2 = {
self.serialize_query(q): float(s)
for q, s in zip(_queries, _scores)
if not np.isnan(s)
}
if len(queries2):
rj.zadd(key, queries2)
n_queries += len(queries2)
if done is not None and done.is_set():
break
return n_queries
def serialize_query(self, q: Query) -> str:
"""
Serialize a query (so it can go in the database).
"""
# TODO: use ast.literal_eval or json.loads
h, a, b = q
return f"{h}-{a}-{b}"
def get_answers(self, rj: "RedisClient", clear: bool = True) -> List[Answer]:
"""
Get all answers the frontend has received.
"""
from rejson import Path
root = Path.rootPath()
if not clear:
raise NotImplementedError
key = f"alg-{self.ident}-answers"
if key in rj.keys():
pipe = rj.pipeline()
pipe.jsonget(key, Path("."))
pipe.jsonset(key, Path("."), [])
answers, success = pipe.execute()
return answers
return [] | /salmon-triplets-1.0.3.tar.gz/salmon-triplets-1.0.3/salmon/backend/sampler.py | 0.639398 | 0.180883 | sampler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class Coupler. """
from __future__ import print_function, division
from c3po.PhysicsDriver import PhysicsDriver
class NormChoice(object):
"""! Enum definition of norm choice.
Values :
- normMax
- norm2
"""
normMax = 0
norm2 = 1
class Coupler(PhysicsDriver):
"""! Coupler is the base class for the definition of a coupling.
A coupling is defined using PhysicsDriver, DataManager and Exchanger objects.
A user needs to define his own class inheriting from Coupler and to define its solveTimeStep() method.
It may also be necessary to overload the I/O methods (for fields and/or of scalars) inherited from PhysicsDriver.
@note Coupler inherits from PhysicsDriver, it is therefore possible to couple of Coupler objects!
"""
def __init__(self, physics, exchangers, dataManagers=[]):
"""! Build an Coupler object.
@param physics a list (or dictionary) of PhysicsDriver objects to be coupled.
@param exchangers a list (or dictionary) of Exchanger for the coupling.
@param dataManagers a list (or dictionary) of DataManager used in the coupling.
"""
PhysicsDriver.__init__(self)
self._physicsDrivers = physics
self._physicsDriversList = physics if not isinstance(physics, dict) else list(physics.values())
self._exchangers = exchangers
self._dataManagers = dataManagers
self._norm = NormChoice.normMax
self._dt = 1.e30
def getMEDCouplingMajorVersion(self):
"""! See PhysicsDriver.getMEDCouplingMajorVersion(). """
version = 0
for physics in self._physicsDriversList:
localVersion = version
try:
localVersion = physics.getMEDCouplingMajorVersion()
except NotImplementedError:
localVersion = version
if localVersion != version:
if version == 0:
version = localVersion
else:
raise Exception("Coupler.getMEDCouplingMajorVersion Not a unique version.")
if version == 0:
raise NotImplementedError
return version
def isMEDCoupling64Bits(self):
"""! See PhysicsDriver.isMEDCoupling64Bits(). """
resu = None
for physics in self._physicsDriversList:
localResu = resu
try:
localResu = physics.isMEDCoupling64Bits()
except NotImplementedError:
pass
if localResu != resu:
if resu is None:
resu = localResu
else:
raise Exception("Coupler.isMEDCoupling64Bits Not a unique answer.")
if resu is None:
raise NotImplementedError
return resu
def initialize(self):
"""! See PhysicsDriver.initialize(). """
for physics in self._physicsDriversList:
physics.init()
resu = True
for physics in self._physicsDriversList:
resu = (resu and physics.getInitStatus())
return resu
def terminate(self):
"""! See PhysicsDriver.terminate(). """
for physics in self._physicsDriversList:
physics.term()
def presentTime(self):
"""! See PhysicsDriver.presentTime(). """
if len(self._physicsDriversList) > 0:
return self._physicsDriversList[0].presentTime()
return 0.
def computeTimeStep(self):
"""! See PhysicsDriver.computeTimeStep(). """
(dt, stop) = (1.e30, False)
for physics in self._physicsDriversList:
(dtPhysics, stopPhysics) = physics.computeTimeStep()
if dtPhysics < dt:
dt = dtPhysics
stop = (stop or stopPhysics)
return (dt, stop)
def initTimeStep(self, dt):
"""! See PhysicsDriver.initTimeStep(). """
self._dt = dt
resu = True
for physics in self._physicsDriversList:
resu = (physics.initTimeStep(dt) and resu)
return resu
def getSolveStatus(self):
"""! See PhysicsDriver.getSolveStatus(). """
resu = True
for physics in self._physicsDriversList:
resu = resu and physics.getSolveStatus()
return resu
def validateTimeStep(self):
"""! See PhysicsDriver.validateTimeStep(). """
for physics in self._physicsDriversList:
physics.validateTimeStep()
def setStationaryMode(self, stationaryMode):
"""! See PhysicsDriver.setStationaryMode(). """
for physics in self._physicsDriversList:
physics.setStationaryMode(stationaryMode)
def getStationaryMode(self):
"""! See PhysicsDriver.getStationaryMode(). """
resu = False
if len(self._physicsDriversList) > 0:
resu = self._physicsDriversList[0].getStationaryMode()
for physics in self._physicsDriversList[1:]:
if physics.getStationaryMode() != resu:
raise Exception("Coupler.getStationaryMode Not a unique stationary mode.")
return resu
def abortTimeStep(self):
"""! See PhysicsDriver.abortTimeStep(). """
for physics in self._physicsDriversList:
physics.abortTimeStep()
def isStationary(self):
"""! See PhysicsDriver.isStationary(). """
resu = True
for physics in self._physicsDriversList:
resu = (resu and physics.isStationary())
return resu
def resetTime(self, time_):
"""! See PhysicsDriver.resetTime(). """
for physics in self._physicsDriversList:
physics.resetTime(time_)
def getIterateStatus(self):
"""! See PhysicsDriver.getSolveStatus(). """
succeed = True
converged = True
for physics in self._physicsDriversList:
(physicsSucceed, physicsConverged) = physics.getIterateStatus()
succeed = succeed and physicsSucceed
converged = converged and physicsConverged
return (succeed, converged)
def setNormChoice(self, choice):
"""! Choose a norm for future use.
@param choice to be choosen between :
- NormChoice.normMax : infinite norm. This is the default choice.
- NormChoice.norm2 : norm 2 ( sqrt(sum_i(val[i] * val[i])) ).
"""
self._norm = choice
def getNorm(self, data):
"""! Return the norm choosen by setNormChoice of data (a DataManager).
@param data a DataManager object.
@return the asked norm of data.
"""
if self._norm == NormChoice.normMax:
return data.normMax()
if self._norm == NormChoice.norm2:
return data.norm2()
raise Exception("Coupler.getNorm The required norm is unknown.")
def readNormData(self):
"""! Return a list of the norms of the DataManager objects hold by self.
@return list of the norm of the DataManager objects.
"""
normData = []
for data in self._dataManagers:
normData.append(self.getNorm(data))
return normData
def normalizeData(self, norms):
"""! Divide the DataManager objects by the scalar values provided.
@param norms list of scalar values. The DataManager are divided by these scalars.
"""
for i, norm in enumerate(norms):
if norm > 0.:
self._dataManagers[i] *= 1. / norm
def denormalizeData(self, norms):
"""! Multiply the DataManager objects by the scalar values provided.
@param norms list of scalar values. The DataManager are multiplied by these scalars.
"""
for i, norm in enumerate(norms):
if norm > 0.:
self._dataManagers[i] *= norm | /salome_c3po-2.2-py3-none-any.whl/c3po/Coupler.py | 0.761361 | 0.158207 | Coupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class LocalDataManager. """
from __future__ import print_function, division
import math
import numpy
from c3po.DataManager import DataManager
from c3po.DataAccessor import DataAccessor
class LocalDataManager(DataManager, DataAccessor):
"""! LocalDataManager is the implementation of DataManager for local data. It also implements DataAccessor.
Data can be double, int, string, fields of double of fields of int.
Only double and fields of double are affected by the methods herited from DataManager.
Other data are just (shallow) copied in new objects created by these methods.
"""
def __init__(self):
"""! Default constructor """
self.valuesDouble = {}
self.valuesInt = {}
self.valuesString = {}
self.fieldsDouble = {}
self.fieldsInt = {}
self.fieldsDoubleTemplates = {}
def clone(self):
"""! Return a clone of self.
@return A clone of self. Data are copied.
"""
return self * 1.
def cloneEmpty(self):
"""! Return a clone of self without copying the data.
@return An empty clone of self.
"""
output = LocalDataManager()
output.valuesInt = self.valuesInt
output.valuesString = self.valuesString
output.fieldsInt = self.fieldsInt
output.fieldsDoubleTemplates = self.fieldsDoubleTemplates
return output
def copy(self, other):
"""! Copy data of other in self.
@param other a LocalDataManager with the same list of data than self.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
for name in self.valuesDouble:
self.valuesDouble[name] = other.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
otherArray = other.fieldsDouble[name].getArray()
self.fieldsDouble[name].getArray().setPartOfValues1(other.fieldsDouble[name].getArray(), 0, otherArray.getNumberOfTuples(), 1, 0, otherArray.getNumberOfComponents(), 1)
def normMax(self):
"""! Return the infinite norm.
@return The max of the absolute values of the scalars and of the infinite norms of the MED fields.
"""
norm = 0.
for scalar in self.valuesDouble.values():
if abs(scalar) > norm:
norm = abs(scalar)
for med in self.fieldsDouble.values():
normMED = med.normMax()
try:
normMED = max(normMED)
except:
pass
if normMED > norm:
norm = normMED
return norm
def norm2(self):
"""! Return the norm 2.
@return sqrt(sum_i(val[i] * val[i])) where val[i] stands for each scalar and each component of the MED fields.
"""
norm = 0.
for scalar in self.valuesDouble.values():
norm += scalar * scalar
for med in self.fieldsDouble.values():
localNorm = med.norm2()
norm += localNorm * localNorm
return math.sqrt(norm)
def checkBeforeOperator(self, other):
"""! INTERNAL Make basic checks before the call of an operator: same data names between self and other. """
if len(self.valuesDouble) != len(other.valuesDouble) or len(self.fieldsDouble) != len(other.fieldsDouble):
raise Exception("LocalDataManager.checkBeforeOperator : we cannot call an operator between two LocalDataManager with different number of stored data.")
for name in self.valuesDouble:
if name not in other.valuesDouble:
raise Exception("LocalDataManager.checkBeforeOperator : we cannot call an operator between two LocalDataManager with different data.")
for name in self.fieldsDouble:
if name not in other.fieldsDouble:
raise Exception("LocalDataManager.checkBeforeOperator : we cannot call an operator between two LocalDataManager with different data.")
def __add__(self, other):
"""! Return self + other.
Use "+" to call it. For example a = b + c.
@param other a LocalDataManager with the same list of data then self.
@return a new (consistent with self) LocalDataManager where the data are added.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
newData = self.cloneEmpty()
for name in self.valuesDouble: # pylint: disable=consider-using-dict-items
newData.valuesDouble[name] = self.valuesDouble[name] + other.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
newData.fieldsDouble[name] = 1. * self.fieldsDouble[name]
newData.fieldsDouble[name].getArray().addEqual(other.fieldsDouble[name].getArray()) # On passe par les dataArray pour eviter la verification d'identite des maillages des operateurs des champs !
return newData
def __iadd__(self, other):
"""! Add other in self (in place addition).
Use "+=" to call it. For example a += b.
@param other a LocalDataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
for name in self.valuesDouble:
self.valuesDouble[name] += other.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
self.fieldsDouble[name].getArray().addEqual(other.fieldsDouble[name].getArray()) # On passe par les dataArray pour eviter la verification d'identite des maillages des operateurs des champs !
return self
def __sub__(self, other):
"""! Return self - other.
Use "-" to call it. For example a = b - c.
@param other a LocalDataManager with the same list of data then self.
@return a new (consistent with self) LocalDataManager where the data are substracted.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
newData = self.cloneEmpty()
for name in self.valuesDouble: # pylint: disable=consider-using-dict-items
newData.valuesDouble[name] = self.valuesDouble[name] - other.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
newData.fieldsDouble[name] = 1. * self.fieldsDouble[name]
newData.fieldsDouble[name].getArray().substractEqual(other.fieldsDouble[name].getArray()) # On passe par les dataArray pour eviter la verification d'identite des maillages des operateurs des champs !
return newData
def __isub__(self, other):
"""! Substract other to self (in place subtraction).
Use "-=" to call it. For example a -= b.
@param other a LocalDataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
for name in self.valuesDouble:
self.valuesDouble[name] -= other.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
self.fieldsDouble[name].getArray().substractEqual(other.fieldsDouble[name].getArray()) # On passe par les dataArray pour eviter la verification d'identite des maillages des operateurs des champs !
return self
def __mul__(self, scalar):
"""! Return scalar * self.
Use "*" to call it. For example a = b * c. The scalar first.
@param scalar a scalar value.
@return a new (consistent with self) LocalDataManager where the data are multiplied by scalar.
"""
newData = self.cloneEmpty()
for name in self.valuesDouble: # pylint: disable=consider-using-dict-items
newData.valuesDouble[name] = scalar * self.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
newData.fieldsDouble[name] = scalar * self.fieldsDouble[name]
return newData
def __imul__(self, scalar):
"""! Multiply self by scalar (in place multiplication).
Use "*=" to call it. For example a *= b.
@param scalar a scalar value.
@return self.
"""
for name in self.valuesDouble:
self.valuesDouble[name] *= scalar
for name in self.fieldsDouble:
self.fieldsDouble[name] *= scalar
return self
def imuladd(self, scalar, other):
"""! Add in self scalar * other (in place operation).
In order to do so, other *= scalar and other *= 1./scalar are done.
For example a.imuladd(b, c).
@param scalar a scalar value.
@param other a LocalDataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
if scalar == 0:
return self
self.checkBeforeOperator(other)
other *= scalar
self += other
other *= 1. / scalar
return self
def dot(self, other):
"""! Return the scalar product of self with other.
@param other a LocalDataManager with the same list of data then self.
@return the scalar product of self with other.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
result = 0.
for name in self.valuesDouble: # pylint: disable=consider-using-dict-items
result += self.valuesDouble[name] * other.valuesDouble[name]
for name in self.fieldsDouble: # pylint: disable=consider-using-dict-items
nparr1 = self.fieldsDouble[name].getArray().toNumPyArray()
nparr2 = other.fieldsDouble[name].getArray().toNumPyArray()
dim = 1
if self.fieldsDouble[name].getArray().getNumberOfComponents() > 1:
dim = 2
result += numpy.tensordot(nparr1, nparr2, dim)
return result
def setInputMEDDoubleField(self, name, field):
"""! Store the MED field field under the name name.
@param name the name given to the field to store.
@param field a field to store.
"""
self.fieldsDouble[name] = field
def getOutputMEDDoubleField(self, name):
"""! Return the MED field of name name previously stored.
@param name the name of the field to return.
@return the MED field of name name previously stored.
@throw Exception If there is no stored name Double field.
"""
if name not in self.fieldsDouble:
raise Exception("LocalDataManager.getOutputMEDDoubleField unknown field " + name)
return self.fieldsDouble[name]
def setInputMEDIntField(self, name, field):
"""! Similar to setInputMEDDoubleField() but for MEDIntField. """
self.fieldsInt[name] = field
def getOutputMEDIntField(self, name):
"""! Similar to getOutputMEDDoubleField() but for MEDIntField. """
if name not in self.fieldsInt:
raise Exception("LocalDataManager.getOutputMEDIntField unknown field " + name)
return self.fieldsInt[name]
def getFieldType(self, name):
"""! Return the type of a previously stored field. """
if name in self.fieldsDouble:
return DataAccessor.ValueType.Double
if name in self.fieldsInt:
return DataAccessor.ValueType.Int
raise Exception("LocalDataManager.getFieldType unknown field " + name)
def setInputDoubleValue(self, name, value):
"""! Store the scalar value under the name name.
@param name the name given to the scalar to store.
@param value a scalar value to store.
"""
self.valuesDouble[name] = value
def getOutputDoubleValue(self, name):
"""! Return the scalar of name name previously stored.
@param name the name of the value to return.
@return the value of name name previously stored.
@throw Exception If there is no stored name Double value.
"""
if name not in self.valuesDouble:
raise Exception("LocalDataManager.getOutputDoubleValue unknown value " + name)
return self.valuesDouble[name]
def setInputIntValue(self, name, value):
"""! Similar to setInputDoubleValue() but for Int. """
self.valuesInt[name] = value
def getOutputIntValue(self, name):
"""! Similar to getOutputDoubleValue() but for Int. """
if name not in self.valuesInt:
raise Exception("LocalDataManager.getOutputIntValue unknown value " + name)
return self.valuesInt[name]
def setInputStringValue(self, name, value):
"""! Similar to setInputDoubleValue() but for String. """
self.valuesString[name] = value
def getOutputStringValue(self, name):
"""! Similar to getOutputDoubleValue() but for String. """
if name not in self.valuesString:
raise Exception("LocalDataManager.getOutputStringValue unknown value " + name)
return self.valuesString[name]
def getValueType(self, name):
"""! Return the type of a previously stored field. """
if name in self.valuesDouble:
return DataAccessor.ValueType.Double
if name in self.valuesInt:
return DataAccessor.ValueType.Int
if name in self.valuesString:
return DataAccessor.ValueType.String
raise Exception("LocalDataManager.getValueType unknown scalar " + name)
def setInputMEDDoubleFieldTemplate(self, name, field):
"""! Store the MED field field as a MEDFieldTemplate under the name name.
@param name the name given to the field to store.
@param field a field to store.
@note These fields are not be part of data, and will therefore not be taken into account in data manipulations (operators, norms etc.).
"""
self.fieldsDoubleTemplates[name] = field
def getInputMEDDoubleFieldTemplate(self, name):
"""! Return the MED field previously stored as a MEDDoubleFieldTemplate under the name name. If there is not, returns 0.
@param name the name of the field to return.
@return the MED field of name name previously stored, or 0.
"""
if name not in self.fieldsDoubleTemplates:
return 0
return self.fieldsDoubleTemplates[name] | /salome_c3po-2.2-py3-none-any.whl/c3po/LocalDataManager.py | 0.835349 | 0.334644 | LocalDataManager.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class PhysicsDriver. """
from __future__ import print_function, division
from c3po.DataAccessor import DataAccessor
from c3po.services.TransientLogger import Timekeeper, TransientPrinter
class PhysicsDriver(DataAccessor):
"""! PhysicsDriver is an abstract class which standardizes the functionalities expected by computer codes.
It follows the ICoCo (Interface for Code Coupling) V2 standard.
The ICoCo V2 I/O (in/out) methods are defined in the mother class DataAccessor.
ICoCo V2 is originally defined for C++ here: https://github.com/cea-trust-platform/icoco-coupling.
PhysicsDriver (together with DataAccessor) can be seen as the translation in Python of ICoCo V2.
In order to use a code with C3PO it is necessary to define a class that inherits from PhysicsDriver and to overload
needed methods (including the ones of DataAccessor).
Note that not all the methods need to be implemented! Mandatory methods are marked as such.
Some of the methods may not be called when some conditions are not met (i.e. when not in the correct context). Thus
in this documentation we define the "TIME_STEP_DEFINED context" as the context that the code finds itself, when the method
initTimeStep() has been called, and the method validateTimeStep() (or abortTimeStep()) has not yet been called.
This is the status in which the current computation time step is well defined.
Within the computation of a time step (so within TIME_STEP_DEFINED), the temporal semantic of the fields (and
scalar values) is not imposed by the norm. Said differently, it does not require the fields to be defined at the
start/middle/end of the current time step, this semantic must be agreed on between the codes being coupled.
Fields and scalar values that are set within the TIME_STEP_DEFINED context are invalidated (undefined behavior)
after a call to validateTimeStep() (or abortTimeStep()). They need to be set at each time step. However, fields and scalar
values that are set outside of this context (before the first time step for example, or after the resolution of the last
time step) are permanent (unless modified afterward within the TIME_STEP_DEFINED context).
"""
def __init__(self):
"""! Default constructor.
Internal set up and initialization of the code should not be done here, but rather in initialize().
"""
self._initStatus = True
self._solveStatus = True
self._iterateStatus = (True, True)
self._initNb = 0
self._transientPrinter = TransientPrinter(Timekeeper())
@staticmethod
def GetICoCoMajorVersion(): # pylint: disable=invalid-name
"""! (Mandatory) Return ICoCo interface major version number.
@return (int) ICoCo interface major version number (2 at present)
"""
return 2
def getMEDCouplingMajorVersion(self):
"""! (Optional) Get MEDCoupling major version, if the code was built with MEDCoupling support.
Mandatory if the code is built with MEDCoupling support.
This can be used to assess compatibility between codes when coupling them.
@return (int) the MEDCoupling major version number (typically 7, 8, 9, ...).
"""
raise NotImplementedError
def isMEDCoupling64Bits(self):
"""! (Optional) Indicate whether the code was built with a 64-bits version of MEDCoupling.
Mandatory if the code is built with MEDCoupling support.
This can be used to assess compatibility between codes when coupling them.
@return (bool) True if the code was built with a 64-bits version of MEDCoupling.
"""
raise NotImplementedError
def setDataFile(self, datafile):
"""! (Optional) Provide the relative path of a data file to be used by the code.
This method must be called before initialize().
@param datafile (string) relative path to the data file.
@throws AssertionError if called multiple times or after initialize().
@throws ValueError if an invalid path is provided.
"""
raise NotImplementedError
def setMPIComm(self, mpicomm):
"""! (Optional) Provide the MPI communicator to be used by the code for parallel computations.
This method must be called before initialize(). The communicator should include all the processes
to be used by the code. For a sequential code, the call to setMPIComm is optional or mpicomm should be None.
@param mpicomm (mpi4py.Comm) mpi4py communicator.
@throws AssertionError if called multiple times or after initialize().
"""
raise NotImplementedError
def init(self):
"""! This is a recommanded wrapper for initialize().
It works with term() in order to guarantee that initialize() and terminate() are called only once:
- initialize() is called at the first call of init().
- terminate() is called when the number of calls to term() is equal to the number of calls to init().
init() also stores the return value of initialize() instead of returning it. The output is accessible with getInitStatus().
@warning This method, in association with getInitStatus(), should always be used inside C3PO instead of initialize()
which is not adapted to MPI Master-Workers paradigm.
@warning This method should never be redefined: define initialize() instead!
"""
if self._initNb == 0:
self._initStatus = self.initialize()
self._initNb += 1
def getInitStatus(self):
"""! Return the output status of the last call to initialize() made through init().
@return (bool) True if all OK, otherwise False.
@warning This method, in association with init(), should always be used inside C3PO instead of initialize() which
is not adapted to MPI Master-Workers paradigm.
@warning This method should never be redefined: define initialize() instead!
"""
return self._initStatus
def initialize(self):
"""! (Mandatory) Initialize the current problem instance.
In this method the code should allocate all its internal structures and be ready to execute. File reads, memory
allocations, and other operations likely to fail should be performed here, and not in the constructor (and not in
the setDataFile() or in the setMPIComm() methods either).
This method must be called only once (after a potential call to setMPIComm() and/or setDataFile()) and cannot be
called again before terminate() has been performed.
@return (bool) True if all OK, otherwise False.
@throws AssertionError if called multiple times.
@warning This method is not adapted to MPI Master-Workers paradigm. init() and getInitStatus() methods should
be used in C3PO instead.
"""
raise NotImplementedError
def term(self):
"""! This is a recommanded wrapper for terminate().
It works with init() in order to guarantee that initialize() and terminate() are called only once:
- initialize() is called at the first call of init().
- terminate() is called when the number of calls to term() is equal to the number of calls to init().
@warning This method should be used inside C3PO instead of terminate().
@warning This method should never be redefined: define terminate() instead!
"""
self._initNb = self._initNb - 1 if self._initNb > 0 else 0
if self._initNb <= 0:
self.terminate()
def terminate(self):
"""! (Mandatory) Terminate the current problem instance and release all allocated resources.
Terminate the computation, free the memory and save whatever needs to be saved. This method is called once
at the end of the computation or after a non-recoverable error.
No other ICoCo method except setDataFile(), setMPIComm() and initialize() may be called after this.
@throws AssertionError if called before initialize() or after terminate().
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
"""
raise NotImplementedError
def getInitNb(self):
"""! Return the number of times init() has been called but not term().
This method is made to work with the wrappers init() and term(). It indicates the number of term() that are
still needed to trigger terminate().
@return (int) The number of times init() has been called but not term().
"""
return self._initNb
def presentTime(self):
"""! (Mandatory) Return the current time of the simulation.
Can be called any time between initialize() and terminate().
The current time can only change during a call to validateTimeStep() or to resetTime().
@return (float) the current (physical) time of the simulation.
@throws AssertionError if called before initialize() or after terminate().
"""
raise NotImplementedError
def computeTimeStep(self):
"""! (Mandatory) Return the next preferred time step (time increment) for this code, and whether the code
wants to stop.
Both data are only indicative, the supervisor is not required to take them into account. This method is
however marked as mandatory, since most of the coupling schemes expect the code to provide this
information (those schemes then typically compute the minimum of the time steps of all the codes being coupled).
Hence a possible implementation is to return a huge value, if a precise figure can not be computed.
Can be called whenever the code is outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@return (float, bool) a tuple (dt, stop).
dt is the preferred time step for this code (only valid if stop is False).
stop is True if the code wants to stop. It can be used for example to indicate that, according to
a certain criterion, the end of the transient computation is reached from the code point of view.
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called before initialize() or after terminate().
"""
raise NotImplementedError
def initTimeStep(self, dt):
"""! (Mandatory) Provide the next time step (time increment) to be used by the code.
After this call (if successful), the computation time step is defined to ]t, t + dt] where t is the value
returned by presentTime(). The code enters the TIME_STEP_DEFINED context.
A time step = 0. may be used when the stationaryMode is set to True for codes solving directly for
the steady-state.
@param dt (float) the time step to be used by the code.
@return (bool) False means that given time step is not compatible with the code time scheme.
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called before initialize() or after terminate().
@throws ValueError if dt is invalid (dt < 0.0).
"""
raise NotImplementedError
def solve(self):
"""! Call solveTimeStep() but store its return value instead of returning it.
The output is accessible with getSolveStatus().
@warning This method, in association with getSolveStatus(), should always be used inside C3PO instead of
solveTimeStep(). They fit better with MPI use.
@warning This method should never be redefined: define solveTimeStep() instead!
"""
self._solveStatus = self.solveTimeStep()
def getSolveStatus(self):
"""! Return the output of the last call to solveTimeStep() made through solve().
@return (bool) True if computation was successful, False otherwise.
@warning This method, in association with solve(), should always be used inside C3PO instead of solveTimeStep().
They fit better with MPI use.
@warning This method should never be redefined: define solveTimeStep() instead!
"""
return self._solveStatus
def solveTimeStep(self):
"""! (Mandatory) Perform the computation on the current time interval.
Can be called whenever the code is inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@return (bool) True if computation was successful, False otherwise.
@throws AssertionError if called outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called several times without a call to validateTimeStep() or to abortTimeStep().
@warning This method is not adapted to MPI Master-Workers paradigm. solve() and getSolveStatus() methods should be
used with C3PO instead.
"""
raise NotImplementedError
def validateTimeStep(self):
"""! (Mandatory) Validate the computation performed by solveTimeStep().
Can be called whenever the code is inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
After this call:
- the present time has been advanced to the end of the computation time step
- the computation time step is undefined (the code leaves the TIME_STEP_DEFINED context).
@throws AssertionError if called outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called before the solveTimeStep() method.
"""
raise NotImplementedError
def setStationaryMode(self, stationaryMode):
"""! (Mandatory) Set whether the code should compute a stationary solution or a transient one.
New in version 2 of ICoCo. By default the code is assumed to be in stationary mode False (i.e. set up
for a transient computation).
If set to True, solveTimeStep() can be used either to solve a time step in view of an asymptotic solution,
or to solve directly for the steady-state. In this last case, a time step = 0. can be used with initTimeStep()
(whose call is always needed).
The stationary mode status of the code can only be modified by this method (or by a call to terminate()
followed by initialize()).
Can be called whenever the code is outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@param stationaryMode (bool) True if the code should compute a stationary solution.
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called before initialize() or after terminate().
"""
raise NotImplementedError
def getStationaryMode(self):
"""! (Mandatory) Indicate whether the code should compute a stationary solution or a transient one.
See also setStationaryMode().
Can be called whenever the code is outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@return (bool) True if the code has been set to compute a stationary solution.
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called before initialize() or after terminate().
"""
raise NotImplementedError
def isStationary(self):
"""! (Optional) Return whether the solution is constant on the computation time step.
Used to know if the steady-state has been reached. This method can be called whenever the computation time step
is not defined.
@return (bool) True if the solution is constant on the computation time step.
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation),
meaning we shouldn't request this information while the computation of a new time step is in progress.
@throws AssertionError if called before initialize() or after terminate().
"""
raise NotImplementedError
def abortTimeStep(self):
"""! (Optional) Abort the computation on the current time step.
Can be called whenever the computation time step is defined, instead of validateTimeStep().
After this call, the present time is left unchanged, and the computation time step is undefined
(the code leaves the TIME_STEP_DEFINED context).
@throws AssertionError if called outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
"""
raise NotImplementedError
def resetTime(self, time_):
"""! (Optional) Reset the current time of the PhysicsDriver to a given value.
New in version 2 of ICoCo.
Particularly useful for the initialization of complex transients: the starting point of the transient
of interest is computed first, the time is reset to 0, and then the actual transient of interest starts with proper
initial conditions, and global time 0.
Can be called outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@param time_ (float) the new current time.
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@throws AssertionError if called before initialize() or after terminate().
"""
raise NotImplementedError
def iterate(self):
"""! Call iterateTimeStep() but store its return value instead of returning it.
The output is accessible with getIterateStatus().
@warning This method, in association with getIterateStatus(), should always be used inside C3PO instead
of iterateTimeStep(). They fit better with MPI use.
@warning This method should never be redefined: define iterateTimeStep() instead!
"""
self._iterateStatus = self.iterateTimeStep()
def getIterateStatus(self):
"""! Return the output of the last call to iterateTimeStep() made through iterate().
@return (bool, bool) a tuple (succeed, converged).
succeed = False if the computation fails.
converged = True if the solution is not evolving any more.
@warning This method, in association with iterate(), should always be used inside C3PO instead of iterateTimeStep().
They fit better with MPI use.
@warning This method should never be redefined: define iterateTimeStep() instead!
"""
return self._iterateStatus
def iterateTimeStep(self):
"""! (Optional) Perform a single iteration of computation inside the time step.
This method is relevant for codes having inner iterations for the computation of a single time step.
Calling iterateTimeStep() until converged is True is equivalent to calling solveTimeStep(), within the code's
convergence threshold.
Can be called (potentially several times) inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@return (bool, bool) a tuple (succeed, converged).
succeed = False if the computation fails.
converged = True if the solution is not evolving any more.
@throws AssertionError if called outside the TIME_STEP_DEFINED context (see PhysicsDriver documentation).
@warning This method is not adapted to MPI Master-Workers paradigm.
iterate() and getIterateStatus() methods should be used with C3PO instead.
"""
raise NotImplementedError
def save(self, label, method):
"""! (Optional) Save the state of the code.
The saved state is identified by the combination of label and method arguments.
If save() has already been called with the same two arguments, the saved state is overwritten.
@param label (int) a user- (or code-) defined value identifying the state.
@param method (string) a string specifying which method is used to save the state of the code. A code can provide
different methods (for example in memory, on disk, etc.).
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation),
meaning we shouldn't save a previous time step while the computation of a new time step is in progress.
@throws AssertionError if called before initialize() or after terminate().
@throws ValueError if the method or label argument is invalid.
"""
raise NotImplementedError
def restore(self, label, method):
"""! (Optional) Restore the state of the code.
After restore, the code should behave exactly like after the corresponding call to save (except of course for
save/restore methods, since the list of saved states may have changed).
The state to be restored is identified by the combination of label and method arguments.
The save() method must have been called at some point or in some previous run with this combination.
@param label (int) a user- (or code-) defined value identifying the state.
@param method (string) a string specifying which method was used to save the state of the code. A code can provide
different methods (for example in memory, on disk, etc.).
@throws AssertionError if called inside the TIME_STEP_DEFINED context (see PhysicsDriver documentation),
meaning we shouldn't restore a previous time step while the computation of a new time step is in progress.
@throws AssertionError if called before initialize() or after terminate().
@throws ValueError if the method or label argument is invalid.
"""
raise NotImplementedError
def forget(self, label, method):
"""! (Optional) Discard a previously saved state of the code.
After this call, the save-point cannot be restored anymore. This method can be used to free the space occupied by
unused saved states.
@param label (int) a user- (or code-) defined value identifying the state.
@param method (string) a string specifying which method was used to save the state of the code. A code can provide
different methods (for example in memory, on disk, etc.).
@throws AssertionError if called before initialize() or after terminate().
@throws ValueError if the method or label argument is invalid.
"""
raise NotImplementedError
def setTransientLogger(self, transientLogger):
"""! Defines the logger for solveTransient() method.
@param transientLogger (c3po.services.TransientLogger.TransientLogger) logger instance.
"""
self._transientPrinter.setLogger(transientLogger)
def setTransientPrintLevel(self, level):
"""! Set the print level for solveTransient() method (0=None, 1 keeps only the first and last lines, 2 keeps everything).
@param level (int) integer in range [0;2]. Default = 0.
"""
self._transientPrinter.getPrinter().setPrintLevel(level)
def solveTransient(self, tmax, finishAtTmax=False, stopIfStationary=False):
"""! Make the PhysicsDriver to advance in time until it reaches the time tmax or it asks to stop.
The PhysicsDriver can ask to stop either with computeTimeStep() (always checked) or with isStationary() (only if stopIfStationary is set to True).
@param tmax (float) maximum time to be reached (compared with presentTime()).
@param finishAtTmax (bool) if set to True, the method ends with time = tmax (instead of time >= tmax).
In case the PhysicsDriver asks to stop before tmax is reached, resetTime(tmax) is called.
@param stopIfStationary (bool) if set to True, the method stops also if isStationary() returns True.
"""
presentTime = self.presentTime()
self._transientPrinter.initTransient(self, tmax, finishAtTmax, stopIfStationary, presentTime)
(dt, stop) = self.computeTimeStep()
while (presentTime < tmax - 1.E-8 * min(tmax, dt) and not stop):
if finishAtTmax:
if presentTime + 1.5 * dt >= tmax:
if presentTime + dt >= tmax - dt * 1.E-4:
dt = tmax - presentTime
else:
dt = 0.5 * (tmax - presentTime)
self.initTimeStep(dt)
self.solve()
ok = self.getSolveStatus()
if ok:
self.validateTimeStep()
presentTime = self.presentTime()
self._transientPrinter.logValidate(dt, presentTime)
(dt, stop) = self.computeTimeStep()
if stopIfStationary:
stop = stop or self.isStationary()
else:
self.abortTimeStep()
presentTime = self.presentTime()
self._transientPrinter.logAbort(dt, presentTime)
(dt2, stop) = self.computeTimeStep()
if dt == dt2:
raise Exception("PhysicsDriver.solveTransient : we are about to repeat a failed time-step calculation !")
dt = dt2
if stop and finishAtTmax:
self.resetTime(tmax)
presentTime = self.presentTime()
self._transientPrinter.terminateTransient(presentTime, stop, stopIfStationary and self.isStationary()) | /salome_c3po-2.2-py3-none-any.whl/c3po/PhysicsDriver.py | 0.872293 | 0.260442 | PhysicsDriver.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class CollaborativeDataManager. """
from __future__ import print_function, division
import math
from c3po.DataManager import DataManager
from c3po.CollaborativeObject import CollaborativeObject
class CollaborativeDataManager(DataManager, CollaborativeObject):
"""! CollaborativeDataManager is a DataManager that handles a set of DataManager as a single one. """
def __init__(self, dataManagers):
"""! Build a CollaborativeDataManager object.
@param dataManagers a list of DataManager.
"""
self.dataManagers = dataManagers
self._indexToIgnore = []
CollaborativeObject.__init__(self, self.dataManagers)
def ignoreForConstOperators(self, indexToIgnore):
"""! INTERNAL """
self._indexToIgnore[:] = indexToIgnore[:]
def clone(self):
"""! Return a clone of self.
@return A clone of self. Data are copied.
"""
return self * 1.
def cloneEmpty(self):
"""! Return a clone of self without copying the data.
@return An empty clone of self.
"""
dataClone = [data.cloneEmpty() for data in self.dataManagers]
output = CollaborativeDataManager(dataClone)
output.ignoreForConstOperators(self._indexToIgnore)
return output
def copy(self, other):
"""! Copy data of other in self.
@param other a CollaborativeDataManager with the same list of data than self.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
for i, data in enumerate(self.dataManagers):
data.copy(other.dataManagers[i])
def normMax(self):
"""! Return the infinite norm.
@return The max of the absolute values of the scalars and of the infinite norms of the MED fields.
"""
norm = 0.
for idata, data in enumerate(self.dataManagers):
if idata not in self._indexToIgnore:
localNorm = data.normMax()
if localNorm > norm:
norm = localNorm
return norm
def norm2(self):
"""! Return the norm 2.
@return sqrt(sum_i(val[i] * val[i])) where val[i] stands for each scalar and each component of the MED fields.
"""
norm = 0.
for idata, data in enumerate(self.dataManagers):
if idata not in self._indexToIgnore:
localNorm = data.norm2()
norm += localNorm * localNorm
return math.sqrt(norm)
def checkBeforeOperator(self, other):
"""! INTERNAL Make basic checks before the call of an operator. """
if len(self.dataManagers) != len(other.dataManagers):
raise Exception("CollaborativeDataManager.checkBeforeOperator : we cannot call an operator between two CollaborativeDataManager with different number of DataManager.")
def __add__(self, other):
"""! Return self + other.
Use "+" to call it. For example a = b + c.
@param other a CollaborativeDataManager with the same list of data then self.
@return a new (consistent with self) CollaborativeDataManager where the data are added.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
newData = self.cloneEmpty()
for i in range(len(self.dataManagers)):
newData.dataManagers[i] = self.dataManagers[i] + other.dataManagers[i]
return newData
def __iadd__(self, other):
"""! Add other in self (in place addition).
Use "+=" to call it. For example a += b.
@param other a CollaborativeDataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
for i in range(len(self.dataManagers)):
self.dataManagers[i] += other.dataManagers[i]
return self
def __sub__(self, other):
"""! Return self - other.
Use "-" to call it. For example a = b - c.
@param other a CollaborativeDataManager with the same list of data then self.
@return a new (consistent with self) CollaborativeDataManager where the data are substracted.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
newData = self.cloneEmpty()
for i in range(len(self.dataManagers)):
newData.dataManagers[i] = self.dataManagers[i] - other.dataManagers[i]
return newData
def __isub__(self, other):
"""! Substract other to self (in place subtraction).
Use "-=" to call it. For example a -= b.
@param other a CollaborativeDataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
for i in range(len(self.dataManagers)):
self.dataManagers[i] -= other.dataManagers[i]
return self
def __mul__(self, scalar):
"""! Return scalar * self.
Use "*" to call it. For example a = b * c. The scalar first.
@param scalar a scalar value.
@return a new (consistent with self) CollaborativeDataManager where the data are multiplied by scalar.
"""
newData = self.cloneEmpty()
for i in range(len(self.dataManagers)):
newData.dataManagers[i] = self.dataManagers[i] * scalar
return newData
def __imul__(self, scalar):
"""! Multiply self by scalar (in place multiplication).
Use "*=" to call it. For example a *= b.
@param scalar a scalar value.
@return self.
"""
for i in range(len(self.dataManagers)):
self.dataManagers[i] *= scalar
return self
def imuladd(self, scalar, other):
"""! Add in self scalar * other (in place operation).
In order to do so, other *= scalar and other *= 1./scalar are done.
For example a.imuladd(b, c).
@param scalar a scalar value.
@param other a CollaborativeDataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
if scalar == 0:
return self
self.checkBeforeOperator(other)
other *= scalar
self += other
other *= 1. / scalar
return self
def dot(self, other):
"""! Return the scalar product of self with other.
@param other a CollaborativeDataManager with the same list of data then self.
@return the scalar product of self with other.
@throw Exception if self and other are not consistent.
"""
self.checkBeforeOperator(other)
result = 0.
for i in range(len(self.dataManagers)):
if i not in self._indexToIgnore:
result += self.dataManagers[i].dot(other.dataManagers[i])
return result | /salome_c3po-2.2-py3-none-any.whl/c3po/CollaborativeDataManager.py | 0.867864 | 0.270342 | CollaborativeDataManager.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class TimeAccumulator. """
from __future__ import print_function, division
from c3po.PhysicsDriver import PhysicsDriver
from c3po.services.Printer import warning
class SaveAtInitTimeStep(object):
"""! Enum defining TimeAccumulator saving mode.
Values:
- never:
TimeAccumulator never call the save() / restore() methods. abortTimeStep() is not available if not in StationaryMode.
- always:
TimeAccumulator saves at every initTimeStep() call and restores at every abortTimeStep(), even in StationaryMode.
- transient:
TimeAccumulator saves at every initTimeStep() call and restores at every abortTimeStep(), if not in StationaryMode.
- transientExceptAfterAbort:
If not in StationaryMode:
- TimeAccumulator saves at every initTimeStep() call that does not follow an abortTimeStep() (the first attempt to compute a time-step).
- It restores at every abortTimeStep(), if not in StationaryMode.
"""
never = 0
always = 1
transient = 2
transientExceptAfterAbort = 3
class TimeAccumulator(PhysicsDriver):
"""! TimeAccumulator wraps a PhysicsDriver into a macro time step procedure (for transients or stationaries (through stabilized transients)).
In transient calculations, the TimeAccumulator object is driven like any PhysicsDriver, but it will use macro time steps (chosen with
initTimeStep()) whereas the wraped PhysicsDriver may use smaller internal time steps (given by its own computeTimeStep() method).
In stationary calculations, if the stabilizedTransient mode is activated, when a steady-state is asked to the TimeAccumulator object
(initTimeStep(0) in stationaryMode), a time loop over the wraped PhysicsDriver object is run until steady-state is reached (isStationary() return True).
If the stabilizedTransient mode is not activated, steady-state calculations (initTimeStep(0) in stationaryMode) are directly asked to the wraped PhysicsDriver.
"""
def __init__(self, physics, saveParameters=None, stabilizedTransient=(False, 100.)):
"""! Build a TimeAccumulator object.
@param physics the PhysicsDriver to wrap.
@param saveParameters the tuple (label, method) that can be used to save / restore results in order to provide abortTimeStep() capabilities in transient.
The method setSavingMode() allows to choose when saving is done.
@param stabilizedTransient a tuple (activated, tMax). If activated is set to True, it computes steady states (dt = 0) as stabilized transients (until physics.isStationary()
returns True or the current time reaches tInit + tMax) and then uses resetTime(tInit) in order to keep time consistency (tInit is the returned value of
physics.presentTime() before solving). If activated is set to False (default value), steady-states (dt = 0) are directly asked to physics.
The method setStabilizedTransient() allows to modify these data.
"""
PhysicsDriver.__init__(self)
self._physics = physics
self._dt = None
self._timeDifference = 0.
self._macrodt = None
self._saveParameters = saveParameters
self._savingMode = SaveAtInitTimeStep.transient
self._stabilizedTransient = stabilizedTransient
self._afterAbort = False
def setSavingMode(self, savingMode):
"""! Set a saving mode.
@param savingMode see SaveAtInitTimeStep documentation for available options. Default value is SaveAtInitTimeStep.transient.
"""
self._savingMode = savingMode
def setStabilizedTransient(self, stabilizedTransient):
"""! Set stabilized transient data.
@param stabilizedTransient see parameter stabilizedTransient of the <tt>__init__()</tt> method.
"""
self._stabilizedTransient = stabilizedTransient
def setComputedTimeStep(self, dt):
"""! Set time-step size returned by computeTimeStep().
@param dt (float) time-step size returned by computeTimeStep(). None can be set to use the time step recommended by the hold PhysicsDriver. Default: None.
"""
self._macrodt = dt
def getPhysicsDriver(self):
"""! Return the wrapped PhysicsDriver.
@return the wrapped PhysicsDriver.
"""
return self._physics
def getMEDCouplingMajorVersion(self):
"""! See PhysicsDriver.getMEDCouplingMajorVersion(). """
return self._physics.getMEDCouplingMajorVersion()
def isMEDCoupling64Bits(self):
"""! See PhysicsDriver.isMEDCoupling64Bits(). """
return self._physics.isMEDCoupling64Bits()
def setDataFile(self, datafile):
"""! See PhysicsDriver.setDataFile(). """
self._physics.setDataFile(datafile)
def setMPIComm(self, mpicomm):
"""! See PhysicsDriver.setMPIComm(). """
self._physics.setMPIComm(mpicomm)
def initialize(self):
"""! See PhysicsDriver.initialize(). """
self._timeDifference = 0.
self._afterAbort = False
self._physics.init()
return self._physics.getInitStatus()
def terminate(self):
"""! See PhysicsDriver.terminate(). """
if self._saveParameters is not None:
try:
self._physics.forget(*self._saveParameters)
except:
pass
self._physics.term()
def presentTime(self):
"""! See PhysicsDriver.presentTime(). """
return self._physics.presentTime() - self._timeDifference
def computeTimeStep(self):
"""! See PhysicsDriver.computeTimeStep().
Return the asked macro time step if set (by setValue("macrodt", dt)), the prefered time step of the PhysicsDriver otherwise.
"""
(dtPhysics, stop) = self._physics.computeTimeStep()
if self._macrodt is not None:
dtPhysics = self._macrodt
return (dtPhysics, stop)
def initTimeStep(self, dt):
"""! See PhysicsDriver.initTimeStep(). """
self._dt = dt
if self._dt <= 0 and not self._stabilizedTransient[0]:
return self._physics.initTimeStep(dt)
if self._dt == 0 and self._stabilizedTransient[0] and not self.getStationaryMode():
raise AssertionError("TimeAccumulator.initTimeStep : Stationary mode must be activated (setStationaryMode(True)) in order to use a stabilized transient to reach a steady state solution.")
if self._saveParameters is not None:
if self._savingMode == SaveAtInitTimeStep.always or (not self.getStationaryMode() and
(self._savingMode == SaveAtInitTimeStep.transient or (not self._afterAbort and self._savingMode == SaveAtInitTimeStep.transientExceptAfterAbort))):
self._physics.save(*self._saveParameters)
return True
def solveTimeStep(self):
"""! Make the PhysicsDriver to reach the end of the macro time step asked to TimeAccumulator
using its own time advance procedure.
"""
timeInit = self._physics.presentTime()
if self._dt > 0.:
self._physics.solveTransient(timeInit + self._dt, finishAtTmax=True)
self._timeDifference += self._physics.presentTime() - timeInit
elif self._stabilizedTransient[0]:
self._physics.solveTransient(timeInit + self._stabilizedTransient[1], stopIfStationary=True)
self.resetTime(timeInit)
return self.isStationary()
else:
self._physics.solve()
return self._physics.getSolveStatus()
def validateTimeStep(self):
"""! See PhysicsDriver.validateTimeStep(). """
if self._dt <= 0 and not self._stabilizedTransient[0]:
self._physics.validateTimeStep()
self._dt = None
self._timeDifference = 0.
self._afterAbort = False
def setStationaryMode(self, stationaryMode):
"""! See PhysicsDriver.setStationaryMode(). """
self._physics.setStationaryMode(stationaryMode)
def getStationaryMode(self):
"""! See PhysicsDriver.getStationaryMode(). """
return self._physics.getStationaryMode()
def abortTimeStep(self):
"""! See PhysicsDriver.abortTimeStep(). """
if not self.getStationaryMode():
if self._saveParameters is not None and self._savingMode != SaveAtInitTimeStep.never:
self._physics.restore(*self._saveParameters)
else:
raise Exception("TimeAccumulator.abortTimeStep : not available in transient mode without saveParameters.")
elif self._saveParameters is not None and self._savingMode == SaveAtInitTimeStep.always:
self._physics.restore(*self._saveParameters)
else:
self._physics.abortTimeStep()
self._dt = None
self._timeDifference = 0.
self._afterAbort = True
def isStationary(self):
"""! See PhysicsDriver.isStationary(). """
return self._physics.isStationary()
def resetTime(self, time_):
"""! See PhysicsDriver.resetTime(). """
self._physics.resetTime(time_)
def getInputFieldsNames(self):
"""! See c3po.DataAccessor.DataAccessor.getInputFieldsNames(). """
return self._physics.getInputFieldsNames()
def getOutputFieldsNames(self):
"""! See c3po.DataAccessor.DataAccessor.getOutputFieldsNames(). """
return self._physics.getOutputFieldsNames()
def getFieldType(self, name):
"""! See c3po.DataAccessor.DataAccessor.getFieldType(). """
return self._physics.getFieldType(name)
def getMeshUnit(self):
"""! See c3po.DataAccessor.DataAccessor.getMeshUnit(). """
return self._physics.getMeshUnit()
def getFieldUnit(self, name):
"""! See c3po.DataAccessor.DataAccessor.getFieldUnit(). """
return self._physics.getFieldUnit(name)
def getInputMEDDoubleFieldTemplate(self, name):
"""! See c3po.DataAccessor.DataAccessor.getInputMEDDoubleFieldTemplate(). """
return self._physics.getInputMEDDoubleFieldTemplate(name)
def setInputMEDDoubleField(self, name, field):
"""! See c3po.DataAccessor.DataAccessor.setInputMEDDoubleField(). """
self._physics.setInputMEDDoubleField(name, field)
def getOutputMEDDoubleField(self, name):
"""! See c3po.DataAccessor.DataAccessor.getOutputMEDDoubleField(). """
return self._physics.getOutputMEDDoubleField(name)
def updateOutputMEDDoubleField(self, name, field):
"""! See c3po.DataAccessor.DataAccessor.updateOutputMEDDoubleField(). """
return self._physics.updateOutputMEDDoubleField(name, field)
def getInputMEDIntFieldTemplate(self, name):
"""! See c3po.DataAccessor.DataAccessor.getInputMEDIntFieldTemplate(). """
return self._physics.getInputMEDIntFieldTemplate(name)
def setInputMEDIntField(self, name, field):
"""! See c3po.DataAccessor.DataAccessor.setInputMEDIntField(). """
self._physics.setInputMEDIntField(name, field)
def getOutputMEDIntField(self, name):
"""! See c3po.DataAccessor.DataAccessor.getOutputMEDIntField(). """
return self._physics.getOutputMEDIntField(name)
def updateOutputMEDIntField(self, name, field):
"""! See c3po.DataAccessor.DataAccessor.updateOutputMEDIntField(). """
return self._physics.updateOutputMEDIntField(name, field)
def getInputValuesNames(self):
"""! See c3po.DataAccessor.DataAccessor.getInputValuesNames(). """
return self._physics.getInputValuesNames() + ["macrodt"]
def getOutputValuesNames(self):
"""! See c3po.DataAccessor.DataAccessor.getOutputValuesNames(). """
return self._physics.getOutputValuesNames()
def getValueType(self, name):
"""! See c3po.DataAccessor.DataAccessor.getValueType(). """
if name == "macrodt":
return "Double"
return self._physics.getValueType(name)
def getValueUnit(self, name):
"""! See c3po.DataAccessor.DataAccessor.getValueUnit(). """
if name == "macrodt":
return "s"
return self._physics.getValueUnit(name)
def setInputDoubleValue(self, name, value):
"""! See c3po.DataAccessor.DataAccessor.setInputDoubleValue().
The value associated with the name "macrodt" can be used to set the time-step size returned by computeTimeStep().
"""
if name == "macrodt":
warning('setInputDoubleValue("macrodt", value) is deprecated and will soon by deleted. '
+ "Please use setComputedTimeStep(dt).")
self._macrodt = value
else:
self._physics.setInputDoubleValue(name, value)
def getOutputDoubleValue(self, name):
"""! See c3po.DataAccessor.DataAccessor.getOutputDoubleValue(). """
return self._physics.getOutputDoubleValue(name)
def setInputIntValue(self, name, value):
"""! See c3po.DataAccessor.DataAccessor.setInputIntValue(). """
self._physics.setInputIntValue(name, value)
def getOutputIntValue(self, name):
"""! See c3po.DataAccessor.DataAccessor.getOutputIntValue(). """
return self._physics.getOutputIntValue(name)
def setInputStringValue(self, name, value):
"""! See c3po.DataAccessor.DataAccessor.setInputStringValue(). """
self._physics.setInputStringValue(name, value)
def getOutputStringValue(self, name):
"""! See c3po.DataAccessor.DataAccessor.getOutputStringValue(). """
return self._physics.getOutputStringValue(name) | /salome_c3po-2.2-py3-none-any.whl/c3po/TimeAccumulator.py | 0.807916 | 0.170543 | TimeAccumulator.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class DataManager. """
from __future__ import print_function, division
class DataManager(object):
"""! DataManager is a class interface (to be implemented) which standardizes methods to handle data outside of codes.
They are mainly mathematical operators needed for some coupling algorithms.
"""
def clone(self):
"""! Return a clone of self.
@return A clone of self. Data are copied.
"""
raise NotImplementedError
def cloneEmpty(self):
"""! Return a clone of self without copying the data.
@return An empty clone of self.
"""
raise NotImplementedError
def copy(self, other):
"""! Copy data of other in self.
@param other a DataManager with the same list of data than self.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError
def normMax(self):
"""! Return the infinite norm.
@return The infinite norm of all data.
"""
raise NotImplementedError
def norm2(self):
"""! Return the norm 2.
@return sqrt(sum_i(val[i] * val[i])) where val[i] stands for each scalar and each component of stored data.
"""
raise NotImplementedError
def __add__(self, other):
"""! Return self + other.
Use "+" to call it. For example a = b + c.
@param other a DataManager with the same list of data then self.
@return a new (consistent with self) DataManager where the data are added.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError
def __iadd__(self, other):
"""! Add other in self (in place addition).
Use "+=" to call it. For example a += b.
@param other a DataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError
def __sub__(self, other):
"""! Return self - other.
Use "-" to call it. For example a = b - c.
@param other a DataManager with the same list of data then self.
@return a new (consistent with self) DataManager where the data are substracted.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError
def __isub__(self, other):
"""! Substract other to self (in place subtraction).
Use "-=" to call it. For example a -= b.
@param other a DataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError
def __mul__(self, scalar):
"""! Return scalar * self.
Use "*" to call it. For example a = b * c. The scalar first.
@param scalar a scalar value.
@return a new (consistent with self) DataManager where the data are multiplied by scalar.
"""
raise NotImplementedError
def __imul__(self, scalar):
"""! Multiply self by scalar (in place multiplication).
Use "*=" to call it. For example a *= b.
@param scalar a scalar value.
@return self.
"""
raise NotImplementedError
def imuladd(self, scalar, other):
"""! Add in self scalar * other (in place operation).
In order to do so, other *= scalar and other *= 1./scalar are done.
For example a.imuladd(b, c).
@param scalar a scalar value.
@param other a DataManager with the same list of data then self.
@return self.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError
def dot(self, other):
"""! Return the scalar product of self with other.
@param other a DataManager with the same list of data then self.
@return the scalar product of self with other.
@throw Exception if self and other are not consistent.
"""
raise NotImplementedError | /salome_c3po-2.2-py3-none-any.whl/c3po/DataManager.py | 0.882851 | 0.270708 | DataManager.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class DataAccessor. """
from __future__ import print_function, division
class DataAccessor(object):
"""! DataAccessor is an abstract class which standardizes I/O (in/out) methods.
It follows the ICOCO V2 standard.
See also PhysicsDriver.
"""
class ValueType:
"""! The various possible types for fields or scalar values. """
Double = "Double"
Int = "Int"
String = "String"
def getMPIComm(self):
"""! (Optional) Return the MPI communicator used by the code for parallel computations.
@return (mpi4py.Comm) mpi4py communicator.
"""
raise NotImplementedError
def getInputFieldsNames(self):
"""! (Optional) Get the list of input fields accepted by the code.
@return (list) the list of field names that represent inputs of the code.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
"""
raise NotImplementedError
def getOutputFieldsNames(self):
"""! (Optional) Get the list of output fields that can be provided by the code.
@return (list) the list of field names that can be produced by the code.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
"""
raise NotImplementedError
def getFieldType(self, name):
"""! (Optional) Get the type of a field managed by the code.
(New in version 2) The three possible types are 'Double', 'Int' and 'String', as defined by ValueType.
@param name (string) field name.
@return (string) 'Double', 'Int' or 'String', as defined by ValueType.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the field name is invalid.
"""
raise NotImplementedError
def getMeshUnit(self):
"""! (Optional) Get the (length) unit used to define the meshes supporting the fields.
(New in version 2)
@return (string) length unit in which the mesh coordinates should be understood (e.g. 'm', 'cm', ...).
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
"""
raise NotImplementedError
def getFieldUnit(self, name):
"""! (Optional) Get the physical unit used for a given field.
(New in version 2)
@param name (string) field name.
@return (string) unit in which the field values should be understood (e.g. 'W', 'J', 'Pa', ...).
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the field name is invalid.
"""
raise NotImplementedError
def getInputMEDDoubleFieldTemplate(self, name):
"""! (Optional) Retrieve an empty shell for an input field. This shell can be filled by the caller and then be
given to the code via setInputMEDDoubleField().
The code returns a field with all the data that represents the context of the field (i.e. its support mesh,
its discretization -- on nodes, on elements, ...).
The remaining job for the caller of this method is to fill the actual values of the field itself.
When this is done the field can be sent back to the code through the method setInputMEDDoubleField().
This method is not mandatory but is useful to know the mesh, discretization... on which an input field is
expected. Is is required by C3PO remapping functionalities.
See PhysicsDriver documentation for more details on the time semantic of a field.
@param name (string) name of the field for which we would like the empty shell.
@return (medcoupling.MEDCouplingFieldDouble) field with all the contextual information.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the field name is invalid.
"""
raise NotImplementedError
def setInputMEDDoubleField(self, name, field):
"""! (Optional) Provide the code with input data in the form of a MEDCouplingFieldDouble.
The method getInputMEDDoubleFieldTemplate(), if implemented, may be used first to prepare an empty shell of the field to
pass to the code.
See PhysicsDriver documentation for more details on the time semantic of a field.
@param name (string) name of the field that is given to the code.
@param field (medcoupling.MEDCouplingFieldDouble) field containing the input data to be read by the code. The name
of the field set on this instance (with the Field::setName() method) should not be checked. However its time value
should be to ensure it is within the proper time interval ]t, t+dt].
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the field name ('name' parameter) is invalid.
@throws ValueError if the time property of 'field' does not belong to the currently computed time step ]t, t + dt].
"""
raise NotImplementedError
def getOutputMEDDoubleField(self, name):
"""! (Optional) Return output data from the code in the form of a MEDCouplingFieldDouble.
See PhysicsDriver documentation for more details on the time semantic of a field.
@param name (string) name of the field that the caller requests from the code.
@return (medcoupling.MEDCouplingFieldDouble) field with the data read by the code. The name
and time properties of the field should be set in accordance with the 'name' parameter and with the current
time step being computed.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the field name is invalid.
"""
raise NotImplementedError
def updateOutputMEDDoubleField(self, name, field):
"""! (Optional) Update a previously retrieved output field.
(New in version 2) This method allows the code to implement a more efficient update of a given output field,
thus avoiding the caller to invoke getOutputMEDDoubleField() each time.
A previous call to getOutputMEDDoubleField() with the same name must have been done prior to this call.
The code should check the consistency of the field object with the requested data (same support mesh,
discretization -- on nodes, on elements, etc.).
See PhysicsDriver documentation for more details on the time semantic of a field.
@param name (string) name of the field that the caller requests from the code.
@param field (medcoupling.MEDCouplingFieldDouble) object updated with the data read from the code. Notably
the time indicated in the field should be updated to be within the current time step being computed.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the field name ('name' parameter) is invalid.
@throws ValueError if the field object is inconsistent with the field being requested.
"""
raise NotImplementedError
def getInputMEDIntFieldTemplate(self, name):
"""! Similar to getInputMEDDoubleFieldTemplate() but for MEDCouplingFieldInt. """
raise NotImplementedError
def setInputMEDIntField(self, name, field):
"""! Similar to setInputMEDDoubleField() but for MEDCouplingFieldInt. """
raise NotImplementedError
def getOutputMEDIntField(self, name):
"""! Similar to getOutputMEDDoubleField() but for MEDCouplingFieldInt. """
raise NotImplementedError
def updateOutputMEDIntField(self, name, field):
"""! Similar to updateOutputMEDDoubleField() but for MEDCouplingFieldInt. """
raise NotImplementedError
def getInputMEDStringFieldTemplate(self, name):
"""! Similar to getInputMEDDoubleFieldTemplate() but for MEDCouplingFieldString.
@warning at the time of writing, MEDCouplingFieldString are not yet implemented anywhere.
"""
raise NotImplementedError
def setInputMEDStringField(self, name, field):
"""! Similar to setInputMEDDoubleField() but for MEDCouplingFieldString.
@warning at the time of writing, MEDCouplingFieldString are not yet implemented anywhere.
"""
raise NotImplementedError
def getOutputMEDStringField(self, name):
"""! Similar to getOutputMEDDoubleField() but for MEDCouplingFieldString.
@warning at the time of writing, MEDCouplingFieldString are not yet implemented anywhere.
"""
raise NotImplementedError
def updateOutputMEDStringField(self, name, field):
"""! Similar to updateOutputMEDDoubleField() but for MEDCouplingFieldString.
@warning at the time of writing, MEDCouplingFieldString are not yet implemented anywhere.
"""
raise NotImplementedError
def getInputValuesNames(self):
"""! (Optional) Get the list of input scalars accepted by the code.
@return (list) the list of scalar names that represent inputs of the code.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
"""
raise NotImplementedError
def getOutputValuesNames(self):
"""! (Optional) Get the list of output scalars that can be provided by the code.
@return (list) the list of scalar names that can be returned by the code.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
"""
raise NotImplementedError
def getValueType(self, name):
"""! (Optional) Get the type of a scalar managed by the code.
(New in version 2) The three possible types are 'Double', 'Int' and 'String', as defined by ValueType.
@param name (string) scalar value name.
@return (string) 'Double', 'Int' or 'String', as defined by ValueType.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the scalar name is invalid.
"""
raise NotImplementedError
def getValueUnit(self, name):
"""! (Optional) Get the physical unit used for a given value.
(New in version 2)
@param name (string) scalar value name.
@return (string) unit in which the scalar value should be understood (e.g. 'W', 'J', 'Pa', ...).
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the value name is invalid.
"""
raise NotImplementedError
def setInputDoubleValue(self, name, value):
"""! (Optional) Provide the code with a scalar float data.
See PhysicsDriver documentation for more details on the time semantic of a scalar value.
@param name (string) name of the scalar value that is given to the code.
@param value (float) value passed to the code.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the scalar name is invalid.
"""
raise NotImplementedError
def getOutputDoubleValue(self, name):
"""! (Optional) Retrieve a scalar float value from the code.
See PhysicsDriver documentation for more details on the time semantic of a scalar value.
@param name (string) name of the scalar value to be read from the code.
@return (float) the value read from the code.
@throws AssertionError if implemented in a PhysicsDriver and called before initialize() or after terminate().
@throws ValueError if the scalar name is invalid.
"""
raise NotImplementedError
def setInputIntValue(self, name, value):
"""! (Optional) Similar to setInputDoubleValue() but for an Int value. """
raise NotImplementedError
def getOutputIntValue(self, name):
"""! (Optional) Similar to getOutputDoubleValue() but for an Int value. """
raise NotImplementedError
def setInputStringValue(self, name, value):
"""! (Optional) Similar to setInputDoubleValue() but for an String value. """
raise NotImplementedError
def getOutputStringValue(self, name):
"""! (Optional) Similar to getOutputDoubleValue() but for an String value. """
raise NotImplementedError | /salome_c3po-2.2-py3-none-any.whl/c3po/DataAccessor.py | 0.898966 | 0.272382 | DataAccessor.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class SharedRemappingMulti1D3D. """
from __future__ import print_function, division
import c3po.medcouplingCompat as mc
from c3po.exchangeMethods.SharedRemapping import Remapper, SharedRemapping
def shift1DFields(shiftMap, shiftedFieldPositions, indexTable):
"""! INTERNAL """
newFieldPositions = [-1] * len(shiftedFieldPositions)
availableFields = []
if len(shiftMap) != len(shiftedFieldPositions):
raise Exception("shift1DFields: the provided shiftMap must contain as many values ({} provided) than the number of 1D fields ({}).".format(len(shiftMap), len(shiftedFieldPositions)))
if max(shiftMap) > len(shiftedFieldPositions) - 1:
raise Exception("shift1DFields: the provided shiftMap contains values ({}) greater than the number of 1D fields - 1 ({}).".format(max(shiftMap), len(shiftedFieldPositions) - 1))
for ipos, ifield in enumerate(shiftedFieldPositions):
if shiftMap[ipos] >= 0:
if newFieldPositions[shiftMap[ipos]] >= 0:
raise Exception("shift1DFields: the provided shiftMap contains twice the positive value ({}).".format(shiftMap[ipos]))
newFieldPositions[shiftMap[ipos]] = ifield
else:
availableFields.append(ifield)
count = 0
for ipos, ifield in enumerate(newFieldPositions):
if ifield < 0:
newFieldPositions[ipos] = availableFields[count]
count += 1
newIndexTable = [[] for _ in range(len(indexTable))]
for ipos, ifield in enumerate(newFieldPositions):
newIndexTable[ifield] = indexTable[shiftedFieldPositions[ipos]]
return availableFields, newFieldPositions, newIndexTable
class Multi1D3DRemapper(Remapper):
"""! Allow to share the mesh projection for different SharedRemappingMulti1D3D objects by building them with the same instance of this class. """
def __init__(self, xCoordinates, yCoordinates, indexTable, weights, meshAlignment=False, offset=[0., 0., 0.], rescaling=1., rotation=0., outsideCellsScreening=False, reverseTransformations=True):
"""! Build a Multi1D3DRemapper object.
An intermediate inner 3D mesh is built from a 2D grid defined by the parameters.
The axial coordinates will be read from the 1D fields passed to the remapper.
Each cell of this 2D grid is associated to a 1D field.
@param xCoordinates x coordinates of the inner mesh to build.
@param yCoordinates y coordinates of the inner mesh to build.
@param indexTable For each position of the 2D grid (x coordinate changes first), the index of the 1D field to associate. Put -1 to
associate to nothing.
@param weights Weigh of each 1D field to take into account for extensive variables.
@param meshAlignment see Remapper. The source mesh is the multi1D one and the target mesh the 3D one.
@param offset see Remapper. The source mesh is the multi1D one and the target mesh the 3D one.
@param rescaling see Remapper. The source mesh is the multi1D one and the target mesh the 3D one.
@param rotation see Remapper. The source mesh is the multi1D one and the target mesh the 3D one.
@param outsideCellsScreening see Remapper.
@param reverseTransformations see Remapper.
@warning There seems to be a bug in MEDCoupling that may cause wrong results when rescaling is used with a source mesh (multi1D) of
nature ExtensiveMaximum or IntensiveConservation. In this case, using reverseTransformations=False should be enough to solve
the problem.
"""
Remapper.__init__(self, meshAlignment, offset, rescaling, rotation, outsideCellsScreening, reverseTransformations)
self._indexTable = [[] for _ in range(max(indexTable) + 1)]
for position, indice1D in enumerate(indexTable):
if indice1D >= 0:
self._indexTable[indice1D].append(position)
self._shiftedFieldPositions = list(range(len(self._indexTable)))
if len(self._indexTable) != len(weights):
raise Exception("Multi1D3DRemapper.__init__ we give " + str(len(weights)) + "weight values instead of " + str(len(self._indexTable))
+ ", the number of 1D calculations.")
self._weights = weights
self._xCoordinates = xCoordinates
self._yCoordinates = yCoordinates
self._zCoordinateArrays = [[] for _ in range(len(self._indexTable))]
self._numberOfCellsIn1D = [0] * len(self._indexTable)
self._innerMesh = None
self._innerField = mc.MEDCouplingFieldDouble(mc.ON_CELLS, mc.ONE_TIME)
self._innerField.setName("3DFieldFromMulti1D")
self.isInnerFieldBuilt = False
def buildInnerField(self, meshes1D):
"""! INTERNAL """
internal1DMeshes = []
if len(meshes1D) != len(self._indexTable):
raise Exception("Multi1D3DRemapper.buildInnerField we give " + str(len(meshes1D)) + " 1D meshes instead of " + str(len(self._indexTable)) + ".")
for imesh, mesh1D in enumerate(meshes1D):
self._zCoordinateArrays[imesh] = mesh1D.getCoordsAt(0)
self._numberOfCellsIn1D[imesh] = mesh1D.getNumberOfCells()
for fieldIndex in self._indexTable[imesh]:
internal1DMeshes.append(mc.MEDCouplingCMesh("3DMeshFromMulti1D"))
xIndex = fieldIndex % (len(self._xCoordinates) - 1)
yIndex = fieldIndex // (len(self._xCoordinates) - 1)
arrayX = mc.DataArrayDouble([self._xCoordinates[xIndex], self._xCoordinates[xIndex + 1]])
arrayX.setInfoOnComponent(0, "X [m]")
arrayY = mc.DataArrayDouble([self._yCoordinates[yIndex], self._yCoordinates[yIndex + 1]])
arrayY.setInfoOnComponent(0, "Y [m]")
internal1DMeshes[-1].setCoords(arrayX, arrayY, self._zCoordinateArrays[imesh])
if len(internal1DMeshes) > 0:
self._innerMesh = mc.MEDCouplingMesh.MergeMeshes(internal1DMeshes)
else:
self._innerMesh = mc.MEDCouplingUMesh()
self._innerMesh.setName("3DMeshFromMulti1D")
self._innerField.setMesh(self._innerMesh)
array = mc.DataArrayDouble()
if len(internal1DMeshes) > 0:
array.alloc(self._innerMesh.getNumberOfCells())
array.fillWithValue(0.)
self._innerField.setArray(array)
self.isInnerFieldBuilt = True
self.isInit = False
def getInnerField(self):
"""! INTERNAL """
return self._innerField
def build3DField(self, fields1D, defaultValue=0.):
"""! INTERNAL """
resuField = self._innerField.clone(True)
if len(fields1D) > 0:
resuField.setNature(fields1D[0].getNature())
array3D = resuField.getArray()
array3D.fillWithValue(defaultValue)
indexMin = 0
for i, field in enumerate(fields1D):
array1D = field.getArray()
if resuField.getNature() == mc.ExtensiveMaximum or resuField.getNature() == mc.ExtensiveConservation:
array1D *= self._weights[i]
for _ in self._indexTable[i]:
array3D.setPartOfValues1(array1D, indexMin, indexMin + self._numberOfCellsIn1D[i], 1, 0, 1, 1)
indexMin += self._numberOfCellsIn1D[i]
return resuField
def build1DFields(self, field3D):
"""! INTERNAL """
array3D = field3D.getArray()
fields1D = []
indexMin = 0
for i, list1D in enumerate(self._indexTable):
fields1D.append(mc.MEDCouplingFieldDouble(mc.ON_CELLS, mc.ONE_TIME))
fields1D[-1].setName(field3D.getName())
mesh1D = mc.MEDCouplingCMesh("mesh1D")
mesh1D.setCoords(self._zCoordinateArrays[i])
fields1D[-1].setMesh(mesh1D)
array1D = mc.DataArrayDouble()
array1D.alloc(self._numberOfCellsIn1D[i])
array1D.fillWithValue(0.)
for _ in list1D:
array1Dtmp = mc.DataArrayDouble()
array1Dtmp.alloc(self._numberOfCellsIn1D[i])
array1Dtmp.setContigPartOfSelectedValuesSlice(0, array3D, indexMin, indexMin + self._numberOfCellsIn1D[i], 1)
indexMin += self._numberOfCellsIn1D[i]
array1D.addEqual(array1Dtmp)
if len(list1D) > 0:
array1D *= 1. / len(list1D)
if field3D.getNature() == mc.ExtensiveMaximum or field3D.getNature() == mc.ExtensiveConservation:
array1D *= 1. / self._weights[i]
fields1D[-1].setArray(array1D)
return fields1D
def getNumberOf1DFields(self):
"""! INTERNAL """
return len(self._indexTable)
def shift1DFields(self, shiftMap):
"""! This method allows to shift the index of the 1D fields provided through the indexTable parameter of constructor.
@param shiftMap a list providing for each 1D fields the index (in indexTable) of its new position (-1 can be used to indicate that the field is no more used).
@return the list of the indexes no more used.
For example, shiftMap=[3, -1, 1, 2] indicates that at first call field_0 goes to position 3, field_1 is discharged, field_2 goes to 1 and field_3 goes to 2. It returns [1].
At the second call with the same input, field_0 (now at position 3) goes to 2, field_1 (at 0) goes to 3, field_2 (at 1) is discharged and field_3 (at 2) goes to 1. It returns [2].
The thrid call returns [3], the fourth call [0].
"""
availableFields, shiftedFieldPositions, indexTable = shift1DFields(shiftMap, self._shiftedFieldPositions, self._indexTable)
self.setShiftedIndex(shiftedFieldPositions, indexTable)
return availableFields
def setShiftedIndex(self, shiftedFieldPositions, indexTable):
""" ! INTERNAL """
self._shiftedFieldPositions = shiftedFieldPositions
self._indexTable = indexTable
self.isInnerFieldBuilt = False
class SharedRemappingMulti1D3D(SharedRemapping):
"""! SharedRemappingMulti1D3D is an ExchangeMethod which projects the input fields one by one before returning them as
outputs, in the same order.
See c3po.Exchanger.Exchanger.__init__().
1D fields are processed in packets using the intermediate mesh defined by the Multi1D3DRemapper object.
The method assumes that all input fields (or packets) have the same mesh, and produces output fields on identical meshes.
This output mesh is the one of the first field (or packet) passed to the method (obtained by getInputMEDFieldTemplate).
The input scalars are returned in the same order, without modification.
The initialization of the projection method (long operation) is done only once, and can be shared with other instances
of SharedRemappingMulti1D3D.
"""
def __init__(self, remapper, reverse=False, defaultValue=0., linearTransform=(1., 0.)):
"""! Build a SharedRemappingMulti1D3D object, to be given to an Exchanger.
@param remapper A Multi1D3DRemapper object performing the projection. It can thus be shared with other instances of
SharedRemappingMulti1D3D (its initialization will always be done only once).
@param reverse see SharedRemapping. Direct is multi1D -> 3D, reverse is 3D -> multi1D.
@param defaultValue see SharedRemapping.
@param linearTransform see SharedRemapping.
"""
SharedRemapping.__init__(self, remapper, reverse, defaultValue, linearTransform)
self._numberOf1DFields = self._remapper.getNumberOf1DFields()
def __call__(self, fieldsToGet, fieldsToSet, valuesToGet):
"""! Project the input fields one by one before returning them as outputs, in the same order. """
numberOf3DFields = len(fieldsToGet) if self._isReverse else len(fieldsToSet)
numberOf1DFields = len(fieldsToSet) if self._isReverse else len(fieldsToGet)
if numberOf3DFields * self._numberOf1DFields != numberOf1DFields:
msg = "The number of provided fields ({} 3D fields and {} 1D fields) is wrong.\n".format(numberOf3DFields, numberOf1DFields)
msg += "According to the provided remapper object, there must be {} 1D fields for each 3D fields.".format(self._numberOf1DFields)
raise Exception(msg)
if numberOf3DFields == 0:
return [], valuesToGet
if not self._remapper.isInnerFieldBuilt:
self._remapper.buildInnerField([field.getMesh() for field in (fieldsToSet if self._isReverse else fieldsToGet)])
if self._isReverse:
innerField = self._remapper.getInnerField()
if numberOf1DFields > 0:
innerField.setNature(fieldsToSet[0].getNature())
outputFields, outputValues = SharedRemapping.__call__(self, fieldsToGet, [innerField] * len(fieldsToGet), valuesToGet)
resu = []
for field3D in outputFields:
resu += self._remapper.build1DFields(field3D)
return resu, outputValues
indexFirst = 0
intermediate3DField = []
while indexFirst + self._numberOf1DFields <= len(fieldsToGet):
fields1D = fieldsToGet[indexFirst:indexFirst + self._numberOf1DFields]
intermediate3DField.append(self._remapper.build3DField(fields1D, self._defaultValue))
indexFirst += self._numberOf1DFields
return SharedRemapping.__call__(self, intermediate3DField, fieldsToSet, valuesToGet)
def getPatterns(self):
"""! See ExchangeMethod.getPatterns. """
if self._isReverse:
return [(1, self._numberOf1DFields, 0, 0), (0, 0, 1, 1)]
return [(self._numberOf1DFields, 1, 0, 0), (0, 0, 1, 1)] | /salome_c3po-2.2-py3-none-any.whl/c3po/exchangeMethods/SharedRemappingMulti1D3D.py | 0.846641 | 0.22093 | SharedRemappingMulti1D3D.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class SharedRemapping. """
from __future__ import print_function, division
import pickle
from c3po.medcouplingCompat import MEDCouplingRemapper
from c3po.exchangeMethods.ExchangeMethod import ExchangeMethod
def computeCellsToScreenOut(mesh1, mesh2):
"""! INTERNAL """
bary = []
try:
bary = mesh1.computeCellCenterOfMass() # MEDCoupling 9
except:
bary = mesh1.getBarycenterAndOwner() # MEDCoupling 7
_, cellsId = mesh2.getCellsContainingPoints(bary, 1.0e-8)
dsi = cellsId.deltaShiftIndex()
try:
return dsi.findIdsEqual(0) # MEDCoupling 9
except:
return dsi.getIdsEqual(0) # MEDCoupling 7
class Remapper(object):
"""! Allow to share the mesh projection for different SharedRemapping objects by building them with the same instance of this class. """
def __init__(self, meshAlignment=False, offset=None, rescaling=1., rotation=0., outsideCellsScreening=False, reverseTransformations=True):
"""! Build a Remapper object.
@param meshAlignment If set to True, at the initialization phase of the Remapper object, meshes are translated such as their "bounding
box" are radially centred on (x = 0., y = 0.) and, if the meshes are 3D, have zmin = 0.
@param offset Value of the offset between the source and the target meshes (>0 on z means that the source mesh is above the target one).
The given vector is used to translate the source mesh (after the mesh alignment, if any).
@param rescaling Value of a rescaling factor to be applied between the source and the target meshes (>1 means that the source mesh is
initially larger than the target one). The scaling is centered on [0., 0.(, 0.)] and is applied to the source mesh after mesh
alignment or translation, if any.
@param rotation Value of the rotation between the source and the target meshes. The rotation is centered on [0., 0.(, 0.)] and is about
the vertical axis. >0 means that the source mesh is rotated of the given angle compared to the target one. The inverse rotation is
applied to the source mesh, after mesh alignment or translation, if any. pi means half turn.
@param outsideCellsScreening If set to True, target (and source) cells whose barycentre is outside of source (or target) mesh are screen
out (defaultValue is assigned to them). It can be useful to screen out cells that are in contact with the other mesh, but that should
not be intersected by it. On the other hand, it will screen out cells actually intersected if their barycenter is outside of the other
mesh ! Be careful with this option.
@param reverseTransformations If set to True, all the transformations (translation, rescaling and rotation) applied in initialize() on
the provided meshes are reversed at the end of initialize().
@warning There seems to be a bug in MEDCoupling that may cause wrong results when rescaling is used with a source mesh of nature
ExtensiveMaximum or IntensiveConservation. In this case, it is necessary to use reverseTransformations=False and to never perform a
remapping on a field whose underling mesh has not been rescaled.
"""
self.isInit = False
self._meshAlignment = meshAlignment
self._offset = offset
if rescaling <= 0.:
raise ValueError("Remapper: rescaling must be > 0!")
self._rescaling = rescaling
self._rotation = rotation
self._outsideCellsScreening = outsideCellsScreening
self._reverseTransformations = reverseTransformations
self._cellsToScreenOutSource = []
self._cellsToScreenOutTarget = []
self._loadedMatrix = None
self._remapper = MEDCouplingRemapper()
def initialize(self, sourceMesh, targetMesh):
"""! INTERNAL """
meshDimension = sourceMesh.getMeshDimension()
if targetMesh.getMeshDimension() != meshDimension:
raise Exception("Remapper : the dimension of source and target meshes are not the same ({} and {} respectively).".format(meshDimension, targetMesh.getMeshDimension()))
offsetAlign = []
if self._meshAlignment:
for mesh in [sourceMesh, targetMesh]:
if meshDimension == 2:
[(xmin, xmax), (ymin, ymax)] = mesh.getBoundingBox()
else:
[(xmin, xmax), (ymin, ymax), (zmin, _)] = mesh.getBoundingBox()
offsetAlign.append([-0.5 * (xmin + xmax), -0.5 * (ymin + ymax)] + ([zmin] if meshDimension == 3 else []))
mesh.translate(offsetAlign[-1])
if self._offset is not None and self._offset != [0.] * meshDimension:
sourceMesh.translate([-x for x in self._offset])
if self._rescaling != 1.:
sourceMesh.scale([0.] * meshDimension, 1. / self._rescaling)
if self._rotation != 0.:
if meshDimension == 2:
sourceMesh.rotate([0., 0.], self._rotation)
else:
sourceMesh.rotate([0., 0., 0.], [0., 0., 1.], self._rotation)
if self._loadedMatrix is not None:
self._remapper.setCrudeMatrix(sourceMesh, targetMesh, "P0P0", self._loadedMatrix)
self._loadedMatrix = None
else:
self._remapper.prepare(sourceMesh, targetMesh, "P0P0")
if self._outsideCellsScreening:
self._cellsToScreenOutTarget = computeCellsToScreenOut(targetMesh, sourceMesh)
self._cellsToScreenOutSource = computeCellsToScreenOut(sourceMesh, targetMesh)
if self._reverseTransformations:
if self._rotation != 0.:
if meshDimension == 2:
sourceMesh.rotate([0., 0.], -self._rotation)
else:
sourceMesh.rotate([0., 0., 0.], [0., 0., 1.], -self._rotation)
if self._rescaling != 1.:
sourceMesh.scale([0.] * meshDimension, self._rescaling)
if self._offset is not None and self._offset != [0.] * meshDimension:
sourceMesh.translate([self._offset])
if self._meshAlignment:
sourceMesh.translate([-x for x in offsetAlign[0]])
targetMesh.translate([-x for x in offsetAlign[1]])
self.isInit = True
def directRemap(self, field, defaultValue):
"""! INTERNAL """
outputField = self._remapper.transferField(field, defaultValue)
outputField.getArray()[self._cellsToScreenOutTarget] = defaultValue
return outputField
def reverseRemap(self, field, defaultValue):
"""! INTERNAL """
outputField = self._remapper.reverseTransferField(field, defaultValue)
outputField.getArray()[self._cellsToScreenOutSource] = defaultValue
return outputField
def exportMatrix(self, fileName):
"""! Export remapping matrix on file.
This file can be loaded using loadMatrix() method in order to save initialization time.
@param fileName name of the file to write in.
"""
if not self.isInit:
raise AssertionError("Remapper.export: the object is not initialized! Remapper is usually initialized by the SharedRemapping object using it at the first call.")
with open(fileName, 'wb') as matrixFile:
matrix = self._remapper.getCrudeMatrix()
pickle.dump(matrix, matrixFile)
def loadMatrix(self, fileName):
"""! Load remapping matrix from file.
This file is usually written by exportMatrix() method.
@note This method requires scipy.
@param fileName name of the file to read from.
"""
if self.isInit:
raise AssertionError("Remapper.export: the object is already initialized! You can load matrix only before initialization.")
with open(fileName, 'rb') as matrixFile:
self._loadedMatrix = pickle.load(matrixFile)
class SharedRemapping(ExchangeMethod):
"""! SharedRemapping is an ExchangeMethod which projects the input fields one by one before returning them as outputs,
in the same order.
See c3po.Exchanger.Exchanger.__init__().
The method assumes that all input fields have the same mesh, and produces output fields on identical meshes.
This output mesh is the one of the first field passed to the method (obtained by getInputMEDFieldTemplate).
The input scalars are returned in the same order, without modification.
The initialization of the projection method (long operation) is done only once, and can be shared with other instances of
SharedRemapping through a Remapper object.
"""
def __init__(self, remapper, reverse=False, defaultValue=0., linearTransform=(1., 0.)):
"""! Build an SharedRemapping object, to be given to an Exchanger.
@param remapper A Remapper object (defined in C3PO) performing the projection. It can thus be shared with other instances of
SharedRemapping (its initialization will always be done only once).
@param reverse Allows the Remapper to be shared with an instance of SharedRemapping performing the reverse exchange (the projection
will be done in the reverse direction if reverse is set to True).
@param defaultValue This is the default value to be assigned, during the projection, in the meshes of the target mesh that are not
intersected by the source mesh.
@param linearTransform Tuple (a,b): apply a linear function to all output fields f such as they become a * f + b. The transformation
is applied after the mesh projection.
"""
self._remapper = remapper
self._isReverse = reverse
self._defaultValue = defaultValue
self._linearTransform = linearTransform
def initialize(self, fieldsToGet, fieldsToSet):
"""! INTERNAL """
if not self._remapper.isInit:
if self._isReverse:
self._remapper.initialize(fieldsToSet[0].getMesh(), fieldsToGet[0].getMesh())
else:
self._remapper.initialize(fieldsToGet[0].getMesh(), fieldsToSet[0].getMesh())
def __call__(self, fieldsToGet, fieldsToSet, valuesToGet):
"""! Project the input fields one by one before returning them as outputs, in the same order. """
if len(fieldsToSet) != len(fieldsToGet):
raise Exception("SharedRemapping : there must be the same number of input and output MED fields")
transformedMED = []
if len(fieldsToSet) > 0:
self.initialize(fieldsToGet, fieldsToSet)
for field in fieldsToGet:
if self._isReverse:
transformedMED.append(self._remapper.reverseRemap(field, self._defaultValue))
else:
transformedMED.append(self._remapper.directRemap(field, self._defaultValue))
if self._linearTransform != (1., 0.):
for med in transformedMED:
med.applyLin(*(self._linearTransform))
return transformedMED, valuesToGet
def getPatterns(self):
"""! See ExchangeMethod.getPatterns. """
return [(1, 1, 0, 0), (0, 0, 1, 1)] | /salome_c3po-2.2-py3-none-any.whl/c3po/exchangeMethods/SharedRemapping.py | 0.872279 | 0.220531 | SharedRemapping.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class FixedPointCoupler. """
from __future__ import print_function, division
from c3po.PhysicsDriver import PhysicsDriver
from c3po.Coupler import Coupler
from c3po.CollaborativeDataManager import CollaborativeDataManager
from c3po.services.Printer import Printer
class FixedPointCoupler(Coupler):
"""! FixedPointCoupler inherits from Coupler and proposes a damped fixed point algorithm.
The class proposes an algorithm for the resolution of F(X) = X. Thus FixedPointCoupler is a Coupler working with :
- A single PhysicsDriver (possibly a Coupler) defining the calculations to be made each time F is called.
- A list of DataManager allowing to manipulate the data in the coupling (the X).
- Two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
Each DataManager is normalized with its own norm got after the first iteration.
They are then used as a single DataManager using CollaborativeDataManager.
At each iteration we do (with n the iteration number and alpha the damping factor):
X^{n+1} = alpha * F(X^{n}) + (1 - alpha) * X^{n}
The convergence criteria is : ||F(X^{n}) - X^{n}|| / ||F(X^{n})|| < tolerance. The default norm used is the infinite norm. setNormChoice() allows to choose another one.
The default value of tolerance is 1.E-6. Call setConvergenceParameters() to change it.
The default maximum number of iterations is 100. Call setConvergenceParameters() to change it.
The default damping factor is 1 (no damping). Call setDampingFactor() to change it.
"""
def __init__(self, physics, exchangers, dataManagers):
"""! Build a FixedPointCoupler object.
@param physics list of only one PhysicsDriver (possibly a Coupler).
@param exchangers list of exactly two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
@param dataManagers list of DataManager.
"""
Coupler.__init__(self, physics, exchangers, dataManagers)
self._tolerance = 1.E-6
self._maxiter = 100
self._dampingFactor = 1.
self._iterationPrinter = Printer(2)
self._leaveIfFailed = False
self._useIterate = False
self._iter = 0
if not isinstance(physics, list) or not isinstance(exchangers, list) or not isinstance(dataManagers, list):
raise Exception("FixedPointCoupler.__init__ physics, exchangers and dataManagers must be lists!")
if len(physics) != 1:
raise Exception("FixedPointCoupler.__init__ There must be only one PhysicsDriver")
if len(exchangers) != 2:
raise Exception("FixedPointCoupler.__init__ There must be exactly two Exchanger")
self._data = CollaborativeDataManager(self._dataManagers)
self._previousData = None
self._normData = 0.
def setConvergenceParameters(self, tolerance, maxiter):
"""! Set the convergence parameters (tolerance and maximum number of iterations).
@param tolerance the convergence threshold in ||F(X^{n}) - X^{n}|| / ||F(X^{n})|| < tolerance.
@param maxiter the maximal number of iterations.
"""
self._tolerance = tolerance
self._maxiter = maxiter
def setDampingFactor(self, dampingFactor):
"""! Set the damping factor of the method.
@param dampingFactor the damping factor alpha in the formula X^{n+1} = alpha * F(X^{n}) + (1 - alpha) * X^{n}.
"""
self._dampingFactor = dampingFactor
def setPrintLevel(self, level):
"""! Set the print level during iterations (0=None, 1 keeps last iteration, 2 prints every iteration).
@param level integer in range [0;2]. Default: 2.
"""
if not level in [0, 1, 2]:
raise Exception("FixedPointCoupler.setPrintLevel level should be one of [0, 1, 2]!")
self._iterationPrinter.setPrintLevel(level)
def setFailureManagement(self, leaveIfSolvingFailed):
"""! Set if iterations should continue or not in case of solver failure (solveTimeStep returns False).
@param leaveIfSolvingFailed set False to continue the iterations, True to stop. Default: False.
"""
self._leaveIfFailed = leaveIfSolvingFailed
def setUseIterate(self, useIterate):
""" ! If True is given, the iterate() method on the given PhysicsDriver is called instead of the solve() method.
@param useIterate bool. Set True to use iterate(), False to use solve(). Default: False.
"""
self._useIterate = useIterate
def iterateTimeStep(self):
"""! Make on iteration of a damped fixed-point algorithm.
See also c3po.PhysicsDriver.PhysicsDriver.iterateTimeStep().
"""
physics = self._physicsDrivers[0]
physics2Data = self._exchangers[0]
data2physics = self._exchangers[1]
if self._iter > 0:
if not self._useIterate:
physics.abortTimeStep()
physics.initTimeStep(self._dt)
data2physics.exchange()
if self._useIterate:
physics.iterate()
else:
physics.solve()
physics2Data.exchange()
if self._iter == 0:
self._normData = self.readNormData()
self.normalizeData(self._normData)
if self._iter > 0:
normNewData = self.getNorm(self._data)
self._data -= self._previousData
error = self.getNorm(self._data) / normNewData
self._data *= self._dampingFactor
self._data += self._previousData
self._previousData.copy(self._data)
else:
error = self._tolerance + 1.
self._previousData = self._data.clone()
self.denormalizeData(self._normData)
if self._iterationPrinter.getPrintLevel() > 0:
if self._iter == 0:
self._iterationPrinter.print("fixed-point iteration {} ".format(self._iter))
else:
self._iterationPrinter.print("fixed-point iteration {} error : {:.5e}".format(self._iter, error))
self._iter += 1
succeed, converged = physics.getIterateStatus() if self._useIterate else (physics.getSolveStatus(), True)
converged = converged and error <= self._tolerance
return succeed, converged
def solveTimeStep(self):
"""! Solve a time step using the damped fixed-point algorithm.
See also c3po.PhysicsDriver.PhysicsDriver.solveTimeStep().
"""
converged = False
succeed = True
while (succeed or not self._leaveIfFailed) and (not converged) and self._iter < self._maxiter:
self.iterate()
succeed, converged = self.getIterateStatus()
if self._iterationPrinter.getPrintLevel() == 1:
self._iterationPrinter.reprint(tmplevel=2)
return succeed and converged
def getIterateStatus(self):
"""! See PhysicsDriver.getSolveStatus(). """
return PhysicsDriver.getIterateStatus(self)
def getSolveStatus(self):
"""! See PhysicsDriver.getSolveStatus(). """
return PhysicsDriver.getSolveStatus(self)
def initTimeStep(self, dt):
""" See c3po.PhysicsDriver.PhysicsDriver.initTimeStep(). """
self._iter = 0
self._previousData = 0
return Coupler.initTimeStep(self, dt) | /salome_c3po-2.2-py3-none-any.whl/c3po/couplers/FixedPointCoupler.py | 0.883154 | 0.26956 | FixedPointCoupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class AndersonCoupler. """
from __future__ import print_function, division
import math
import numpy as np
from c3po.Coupler import Coupler
from c3po.CollaborativeDataManager import CollaborativeDataManager
from c3po.services.Printer import Printer
def deleteQRColumn(matrixQ, matrixR, dataTemp):
"""! INTERNAL
Return a new QR decomposition after deletion of one column.
"""
dim = matrixR.shape[0]
for i in range(dim - 1):
temp = math.sqrt(matrixR[i, i + 1] * matrixR[i, i + 1] + matrixR[i + 1, i + 1] * matrixR[i + 1, i + 1])
cval = matrixR[i, i + 1] / temp
sval = matrixR[i + 1, i + 1] / temp
matrixR[i, i + 1] = temp
matrixR[i + 1, i + 1] = 0.
for j in range(i + 2, dim):
temp = cval * matrixR[i, j] + sval * matrixR[i + 1, j]
matrixR[i + 1, j] = -sval * matrixR[i, j] + cval * matrixR[i + 1, j]
matrixR[i, j] = temp
dataTemp.copy(matrixQ[i])
dataTemp *= cval
dataTemp.imuladd(sval, matrixQ[i + 1])
matrixQ[i + 1] *= cval
matrixQ[i + 1].imuladd(-sval, matrixQ[i])
matrixQ[i].copy(dataTemp)
matrixR = matrixR[:dim - 1, 1:].copy()
return matrixQ, matrixR
class AndersonCoupler(Coupler):
"""! AndersonCoupler inherits from Coupler and proposes a fixed point algorithm with Anderson acceleration. A QR decomposition is used for the optimization problem.
The class proposes an algorithm for the resolution of F(X) = X. Thus AndersonCoupler is a Coupler working with :
- A single PhysicsDriver (possibly a Coupler) defining the calculations to be made each time F is called.
- A list of DataManager allowing to manipulate the data in the coupling (the X).
- Two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
Each DataManager is normalized with its own norm got after the first iteration.
They are then used as a single DataManager using CollaborativeDataManager.
The first two iterations just do (with n the iteration number):
X^{n+1} = F(X^{n})
Then the Anderson acceleration starts and computes X^{n+1} as a linear combination of [alpha * F(X^{n-i}) + (1. - alpha) * X^{n-i}].
alpha, the relative fraction of F(X^{n-i}) and X^{n-i} can be set with setAndersonDampingFactor(). Default value is 1 (only F(X^{n-i})).
The default order (number of previous states considered) is 2. Call setOrder() to change it.
The convergence criteria is : ||F(X^{n}) - X^{n}|| / ||F(X^{n})|| < tolerance. The default norm used is the infinite norm. Coupler.setNormChoice() allows to choose another one.
The default value of tolerance is 1.E-6. Call setConvergenceParameters() to change it.
The default maximum number of iterations is 100. Call setConvergenceParameters() to change it.
"""
def __init__(self, physics, exchangers, dataManagers):
"""! Build a AndersonCoupler object.
@param physics list of only one PhysicsDriver (possibly a Coupler).
@param exchangers list of exactly two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
@param dataManagers list of DataManager.
"""
Coupler.__init__(self, physics, exchangers, dataManagers)
self._tolerance = 1.E-6
self._maxiter = 100
self._order = 2
self._andersonDampingFactor = 1.
self._iterationPrinter = Printer(2)
self._leaveIfFailed = False
if not isinstance(physics, list) or not isinstance(exchangers, list) or not isinstance(dataManagers, list):
raise Exception("AndersonCoupler.__init__ physics, exchangers and dataManagers must be lists!")
if len(physics) != 1:
raise Exception("AndersonCoupler.__init__ There must be only one PhysicsDriver")
if len(exchangers) != 2:
raise Exception("AndersonCoupler.__init__ There must be exactly two Exchanger")
def setConvergenceParameters(self, tolerance, maxiter):
"""! Set the convergence parameters (tolerance and maximum number of iterations).
@param tolerance the convergence threshold in ||F(X^{n}) - X^{n}|| / ||X^{n+1}|| < tolerance.
@param maxiter the maximal number of iterations.
"""
self._tolerance = tolerance
self._maxiter = maxiter
def setAndersonDampingFactor(self, andersonDampingFactor):
"""! Set the damping factor of the method, the relative contribution of F(X^{k}) and X^{k} on the calculation of next step.
@param andersonDampingFactor the damping factor alpha in the formula alpha * F(X^{n-i}) + (1. - alpha) * X^{n-i}.
"""
if andersonDampingFactor <= 0 or andersonDampingFactor > 1:
raise Exception("AndersonCoupler.setAndersonDampingFactor Set a damping factor > 0 and <=1 !")
self._andersonDampingFactor = andersonDampingFactor
def setOrder(self, order):
"""! Set the order of the method.
@param order order of Anderson method. This is also the number of previous states stored by the algorithm.
"""
if order <= 0:
raise Exception("AndersonCoupler.setOrder Set an order > 0 !")
self._order = order
def setPrintLevel(self, level):
"""! Set the print level during iterations (0=None, 1 keeps last iteration, 2 prints every iteration).
@param level integer in range [0;2]. Default: 2.
"""
if not level in [0, 1, 2]:
raise Exception("AndersonCoupler.setPrintLevel level should be one of [0, 1, 2]!")
self._iterationPrinter.setPrintLevel(level)
def setFailureManagement(self, leaveIfSolvingFailed):
"""! Set if iterations should continue or not in case of solver failure (solveTimeStep returns False).
@param leaveIfSolvingFailed set False to continue the iterations, True to stop. Default: False.
"""
self._leaveIfFailed = leaveIfSolvingFailed
def solveTimeStep(self):
"""! Solve a time step using the fixed point algorithm with Anderson acceleration.
Inspire de Homer Walker (walker@wpi.edu), 10/14/2011.
See also c3po.PhysicsDriver.PhysicsDriver.solveTimeStep().
"""
physics = self._physicsDrivers[0]
physics2Data = self._exchangers[0]
data2physics = self._exchangers[1]
iiter = 0
# Compteur de la mémoire d'Anderson : nombre de résidus sauvegardés
mAA = 0
# Historique des dF
memory = [0] * self._order
iFirstMemory = 0 # permet de ne pas considerer les premiers elements de memory lorsque le conditionnement est mauvais.
matrixR = np.zeros(shape=(1, 1))
matrixQ = [0.] * self._order
datatmp = 0. # pour manipulation dans deleteQRColumn
# Tolérance sur le conditionnement de matrixR ; valeur par défaut proposée par Ansar, reprise telle quelle
dropErr = 1.e10
# Init On calcul ici l'etat "0"
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Anderson iteration {} ".format(iiter))
physics.solve()
if self._leaveIfFailed and not physics.getSolveStatus():
return False
physics2Data.exchange()
data = CollaborativeDataManager(self._dataManagers)
normData = self.readNormData()
self.normalizeData(normData)
previousData = data.clone()
iiter += 1
# Premiere iteration non acceleree
self.abortTimeStep()
self.initTimeStep(self._dt)
self.denormalizeData(normData)
data2physics.exchange()
physics.solve()
if self._leaveIfFailed and not physics.getSolveStatus():
return False
physics2Data.exchange()
self.normalizeData(normData)
diffData = data - previousData
previousData.copy(data)
deltaF = diffData * -1.
delta = previousData * -1.
error = self.getNorm(diffData) / self.getNorm(data)
iiter += 1
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Anderson iteration {} error : {:.5e} ".format(iiter - 1, error))
while error > self._tolerance and iiter < self._maxiter:
self.abortTimeStep()
self.initTimeStep(self._dt)
self.denormalizeData(normData)
data2physics.exchange()
physics.solve()
if self._leaveIfFailed and not physics.getSolveStatus():
return False
physics2Data.exchange() # data contient g(u_k), previousData contient u_k
self.normalizeData(normData)
diffData.copy(data)
diffData -= previousData
error = self.getNorm(diffData) / self.getNorm(data)
if error > self._tolerance:
deltaF += diffData # F_i - F_{i-1}
delta += data # f(x_i) - f(x_{i-1})
# Selon si on a atteint l'ordre m ou non, on ajoute le nouveau résidu ou bien on enlève la première colonne
# et on rajoute le nouveau à la fin
if iFirstMemory + mAA < len(memory):
memory[iFirstMemory + mAA] = delta.clone()
else:
firstMemory = memory[0]
for i in range(len(memory) - 1):
memory[i] = memory[i + 1]
memory[-1] = firstMemory
memory[-1].copy(delta)
if iFirstMemory > 0:
iFirstMemory -= 1
mAA += 1
if mAA > self._order:
# Si la dimension est deja self._order, on a atteint la taille max : on retire la première colonne
# et on met à jour la décomposition en conséquence
if datatmp == 0.:
datatmp = data.clone()
matrixQ, matrixR = deleteQRColumn(matrixQ, matrixR, datatmp)
# La taille de la matrice matrixQ a diminué : on met à jour mAA
mAA -= 1
# Ajout de la nouvelle colonne à matrixQ et matrixR,
if matrixR.shape[0] != mAA:
tmpR = np.zeros(shape=(mAA, mAA))
tmpR[0:matrixR.shape[0], 0:matrixR.shape[1]] += matrixR
matrixR = tmpR
for j in range(mAA - 1):
val = matrixQ[j].dot(deltaF)
matrixR[j, mAA - 1] = val
deltaF.imuladd(-val, matrixQ[j])
matrixR[mAA - 1, mAA - 1] = deltaF.norm2()
facteurmult = 1.
if matrixR[mAA - 1, mAA - 1] != 0:
facteurmult = 1. / matrixR[mAA - 1, mAA - 1]
if matrixQ[mAA - 1] == 0.:
matrixQ[mAA - 1] = deltaF * facteurmult
else:
matrixQ[mAA - 1].copy(deltaF)
matrixQ[mAA - 1] *= facteurmult
# On prepare l'iteration suivante.
delta.copy(data)
delta *= -1.
deltaF.copy(diffData)
deltaF *= -1.
# Condition Control : en cas de mauvais conditionnement de memory : on peut contrôler ça avec le conditionnement de matrixR
# En cas de mauvais conditionnement, on met à jour matrixQ et matrixR c'est à dire qu'on supprime la première colonne de memory (avec iFirstMemory)
if dropErr > 0.:
condDF = np.linalg.cond(matrixR)
while condDF > dropErr and mAA > 1:
#print("cond(D) = %1.8e, reducing mAA to %d" % (condDF, mAA - 1))
if datatmp == 0.:
datatmp = data.clone()
matrixQ, matrixR = deleteQRColumn(matrixQ, matrixR, datatmp)
iFirstMemory += 1
mAA -= 1
condDF = np.linalg.cond(matrixR)
# On résout le problème de minimisation : on calcule dans un premier temps matrixQ^T F
matrixQF = np.zeros(mAA)
for i in range(mAA):
matrixQF[i] = matrixQ[i].dot(diffData)
# Puis on résoud le système triangulaire : matrixR gamma = matrixQF pour obtenir les coefficients d'Anderson
gamma = np.linalg.lstsq(matrixR, matrixQF, rcond=-1)[0]
# On calcule memory * gamma pour ensuite calculer le nouveau data
for i in range(mAA):
data.imuladd(-gamma[i], memory[iFirstMemory + i])
if self._andersonDampingFactor != 1.:
matrixRgamma = np.dot(matrixR, gamma)
data.imuladd(-(1. - self._andersonDampingFactor), diffData)
for i in range(mAA):
data.imuladd((1. - self._andersonDampingFactor) * matrixRgamma[i], matrixQ[i])
previousData.copy(data)
iiter += 1
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Anderson iteration {} error : {:.5e} ".format(iiter - 1, error))
if self._iterationPrinter.getPrintLevel() == 1:
self._iterationPrinter.reprint(tmplevel=2)
self.denormalizeData(normData)
return physics.getSolveStatus() and error <= self._tolerance | /salome_c3po-2.2-py3-none-any.whl/c3po/couplers/AndersonCoupler.py | 0.872048 | 0.356923 | AndersonCoupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contains the class DynamicResidualBalanceCoupler. """
from __future__ import print_function, division
from c3po.PhysicsDriver import PhysicsDriver
from c3po.Coupler import Coupler
from c3po.LocalDataManager import LocalDataManager
from c3po.services.Printer import Printer
class DynamicResidualBalanceCoupler(Coupler):
"""! DynamicResidualBalanceCoupler inherits from Coupler and proposes a dynamic residual balance algorithm.
This is a variant of the adaptive residual balance implemented by c3po.AdaptiveResidualBalanceCoupler.AdaptiveResidualBalanceCoupler.
This algorithm is designed to couple two solvers using an iterative procedure.
It controls the accuracy required to each solver in order to limit over-solving and make them converge together.
See R. Delvaux, "Algorithmes de couplage entre neutronique, thermohydraulique et thermique", PhD Thesis, Institut Polytechnique de Paris, 2022.
DynamicResidualBalanceCoupler works with :
- Two PhysicsDriver, one for each solver. They must implement the iterateTimeStep() method, together with the possibilities to get residual and set target accuracy.
- Four Exchanger: two for exchanges between the PhysicsDriver, and two to get residuals.
- One LocalDataManager (not just a DataManager) which contains the residuals got with the last two exchangers.
@note Two Exchanger and a DataManager are used to access the residuals in order to support all possible MPI schemes.
The default target accuracies are 1e-4 and the default maximum number of iterations is 100. Use setConvergenceParameters() to change these values.
It may be interesting to use a FixedPointCoupler to add a damping factor and to control the coupling error.
In this case :
- The option setUseIterate(True) of FixedPointCoupler must be used.
- The maximal number of iterations provided to DynamicResidualBalanceCoupler.setConvergenceParameters() is ignored.
- The exchanger '2to1' is also probably redondant with the FixedPointCoupler exchangers and, in this case, can be set to do nothing.
"""
def __init__(self, physics, exchangers, dataManagers):
"""! Build a DynamicResidualBalanceCoupler object.
@param physics list (or dict with keys ['Solver1', 'Solver2']) of two PhysicsDriver. If a list is used, it has to be provided in the same order than the keys here.
The provided PhysicsDriver must implement the iterateTimeStep() method (together with solveTimeStep()) and accept new accuracy (for the solveTimeStep() method) through setInputDoubleValue('Accuracy', value).
@param exchangers list (or dict with keys ['1to2', '2to1', 'Residual1', 'Residual2']) of four Exchanger. If a list is used, it has to be provided in the same order than the keys here.
@param dataManagers list (or dict with keys ['Residuals']) of one LocalDataManager (not just a DataManager).
The residuals must be stored in this DataManager as double values under the names 'Residual1' and 'Residual2'.
"""
Coupler.__init__(self, physics, exchangers, dataManagers)
if not isinstance(physics, (dict, list)):
raise Exception("DynamicResidualBalanceCoupler.__init__ physics must be either a dictionary or a list.")
if len(physics) != 2:
raise Exception("DynamicResidualBalanceCoupler.__init__ There must be exactly two PhysicsDriver, not {}.".format(len(physics)))
if isinstance(physics, dict):
for key in physics.keys():
if key not in ["Solver1", "Solver2"]:
raise Exception('DynamicResidualBalanceCoupler.__init__ if physics is provided as a dictionary, the keys must be : ["Solver1", "Solver2"]. We found : {}.'.format(list(physics.keys())))
self._solver1 = physics["Solver1"]
self._solver2 = physics["Solver2"]
else:
self._solver1 = physics[0]
self._solver2 = physics[1]
if not isinstance(exchangers, (dict, list)):
raise Exception("DynamicResidualBalanceCoupler.__init__ exchangers must be either a dictionary or a list.")
if len(exchangers) != 4:
raise Exception("DynamicResidualBalanceCoupler.__init__ There must be exactly four Exchanger, not {}.".format(len(exchangers)))
if isinstance(exchangers, dict):
for key in exchangers.keys():
if key not in ["1to2", "2to1", "Residual1", "Residual2"]:
raise Exception('DynamicResidualBalanceCoupler.__init__ if exchangers is provided as a dictionary, the keys must be : ["1to2", "2to1", "Residual1", "Residual2"]. We found : {}.'.format(list(exchangers.keys())))
self._exchanger1to2 = exchangers["1to2"]
self._exchanger2to1 = exchangers["2to1"]
self._exchangerResidual1 = exchangers["Residual1"]
self._exchangerResidual2 = exchangers["Residual2"]
else:
self._exchanger1to2 = exchangers[0]
self._exchanger2to1 = exchangers[1]
self._exchangerResidual1 = exchangers[2]
self._exchangerResidual2 = exchangers[3]
if not isinstance(dataManagers, (dict, list)):
raise Exception("DynamicResidualBalanceCoupler.__init__ dataManagers must be either a dictionary or a list.")
if len(dataManagers) != 1:
raise Exception("DynamicResidualBalanceCoupler.__init__ There must be exactly one DataManager, not {}.".format(len(dataManagers)))
if isinstance(dataManagers, dict):
for key in dataManagers.keys():
if key not in ["Residuals"]:
raise Exception('DynamicResidualBalanceCoupler.__init__ if dataManagers is provided as a dictionary, the keys must be : ["Residuals"]. We found : {}.'.format(list(dataManagers.keys())))
self._data = dataManagers["Residuals"]
else:
self._data = dataManagers[0]
if not isinstance(self._data, LocalDataManager):
raise Exception("DynamicResidualBalanceCoupler.__init__ The provided Datamanager must be a LocalDataManager.")
self._iterationPrinter = Printer(2)
self._leaveIfFailed = False
self._epsSolver1Ref = 1e-4
self._accuracySolver1Old = 0.
self._epsSolver2Ref = 1e-4
self._accuracySolver2 = 0.
self._accuracySolver2Old = 0.
self._convRateSolver2 = 0.
self._residualTotal = 0.
self._residualHalfTotal = 0.
self._iter = 0
self._maxiter = 100
def setConvergenceParameters(self, targetResidualSolver1, targetResidualSolver2, maxiter):
"""! Set the convergence parameters (target residuals for each solver and maximum number of iterations).
@param targetResidualSolver1 target residual for solver 1. Default value: 1.E-4.
@param targetResidualSolver2 target residual for solver 2. Default value: 1.E-4.
@param maxiter the maximal number of iterations. Default value: 100.
"""
self._epsSolver1Ref = targetResidualSolver1
self._epsSolver2Ref = targetResidualSolver2
self._maxiter = maxiter
def setPrintLevel(self, level):
"""! Set the print level during iterations (0=None, 1 keeps last iteration, 2 prints every iteration).
@param level integer in range [0;2]. Default: 2.
"""
if not level in [0, 1, 2]:
raise Exception("DynamicResidualBalanceCoupler.setPrintLevel level should be one of [0, 1, 2]!")
self._iterationPrinter.setPrintLevel(level)
def setFailureManagement(self, leaveIfSolvingFailed):
"""! Set if iterations should continue or not in case of solver failure (solveTimeStep returns False).
@param leaveIfSolvingFailed set False to continue the iterations, True to stop. Default: False.
"""
self._leaveIfFailed = leaveIfSolvingFailed
def solveTimeStep(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.solveTimeStep(). """
converged = False
succeed = True
while (succeed or not self._leaveIfFailed) and (not converged) and self._iter < self._maxiter:
self.iterate()
succeed, converged = self.getIterateStatus()
if self._iterationPrinter.getPrintLevel() == 1:
self._iterationPrinter.reprint(tmplevel=2)
return succeed and converged
def iterateTimeStep(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.iterateTimeStep(). """
converged = False
if self._iter == 0:
# -- Computation of the initial residual for Solver1
self._solver1.iterate()
self._exchangerResidual1.exchange()
residualSolver1Initial = self._data.getOutputDoubleValue("Residual1")
# -- Computation of the initial value of the normalized total residual
self._residualTotal = residualSolver1Initial / self._epsSolver1Ref
# -- Initial residual for Solver2, obtained from the last calculation made outside of the algorithm.
self._exchangerResidual2.exchange()
# -- Convergence criteria for Solver1
accuracySolver1 = self._data.getOutputDoubleValue('Residual2') / self._epsSolver2Ref * self._epsSolver1Ref
self._solver1.setInputDoubleValue("Accuracy", accuracySolver1)
self._accuracySolver1Old = accuracySolver1
# -- First iteration for Solver1
self._solver1.solve()
# -- Get the precision reached by Solver1 after the first iteration
self._exchangerResidual1.exchange()
# -- Exchange physical data between Solver1 and Solver2
self._exchanger1to2.exchange()
# -- Computation of the initial residual for Solver2
self._solver2.iterate()
self._exchangerResidual2.exchange()
residualSolver2Initial = self._data.getOutputDoubleValue('Residual2')
# -- Initial value of the total "demi residual"
self._residualHalfTotal = residualSolver2Initial / self._epsSolver2Ref + self._data.getOutputDoubleValue('Residual1') / self._epsSolver1Ref
# -- Convergence criteria for Solver2
self._accuracySolver2 = self._data.getOutputDoubleValue('Residual1') / self._epsSolver1Ref * self._epsSolver2Ref
self._solver2.setInputDoubleValue("Accuracy", self._accuracySolver2)
self._accuracySolver2Old = self._accuracySolver2
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Dynamic Residual Balance iteration {} accuracies: {} ; {}".format(self._iter, accuracySolver1, self._accuracySolver2))
# -- First iteration for Solver2
self._solver2.solve()
# -- End of the first multiphysics iteration
else:
# -- Exchange precision reached by Solver2 and compute current partial residual and intermediate coeff
self._exchangerResidual2.exchange()
residualPartial = self._data.getOutputDoubleValue('Residual2') / self._epsSolver2Ref
lastResidual = self._residualTotal
# -- Computation of the initial residual for Solver1
self._solver1.iterate()
self._exchangerResidual1.exchange()
residualSolver1Initial = self._data.getOutputDoubleValue('Residual1')
# -- Compute total residual and convergence rate
self._residualTotal = residualSolver1Initial / self._epsSolver1Ref + residualPartial
convRateSolver1 = self._residualTotal / lastResidual
# -- Deal with the new precision computed: we don't want a new precision smaller than the targeted one! And if one solver reachs its targeted precision, the one for the second solver is also set to its targeted value
if self._accuracySolver1Old > self._epsSolver1Ref and self._accuracySolver2Old > self._epsSolver2Ref:
# -- Average convergence rate
conv = (convRateSolver1 + self._convRateSolver2) / 2.
accuracySolver1 = self._data.getOutputDoubleValue('Residual2') / self._epsSolver2Ref * self._epsSolver1Ref
accuracySolver1 = min(accuracySolver1, self._accuracySolver1Old)
if accuracySolver1 <= self._epsSolver1Ref or self._accuracySolver2Old <= self._epsSolver2Ref:
converged = True
self._accuracySolver2 = self._epsSolver2Ref
accuracySolver1 = self._epsSolver1Ref
else:
accuracySolver1 = self._epsSolver1Ref
self._accuracySolver2 = self._epsSolver2Ref
converged = True
self._accuracySolver1Old = accuracySolver1
self._solver1.setInputDoubleValue("Accuracy", accuracySolver1)
# -- Computation of Solver1 with the new precision computed
self._solver1.solve()
# -- Exchange physical data between Solver1 and Solver2
self._exchanger1to2.exchange()
# -- Exchange reached precision by Solver1
self._exchangerResidual1.exchange()
# -- Deal with the new precision computed: we don't want a new precision smaller than the targeted one! And if one solver reachs its targeted precision, the one for the second solver is also set to its targeted value
if self._accuracySolver2 > self._epsSolver2Ref and not converged:
# -- Computation of the initial residual for Solver2
self._solver2.iterate()
self._exchangerResidual2.exchange()
residualSolver2Initial = self._data.getOutputDoubleValue('Residual2')
# -- Computation of total current total residual
residualHalfTotalOld = self._residualHalfTotal
self._residualHalfTotal = residualSolver2Initial / self._epsSolver2Ref + self._data.getOutputDoubleValue('Residual1') / self._epsSolver1Ref
# -- Convergence rate for Solver2
self._convRateSolver2 = self._residualHalfTotal / residualHalfTotalOld
# -- Average convergence rate
conv = (convRateSolver1 + self._convRateSolver2) / 2.
# -- Computation of the new precision for Solver2
# -- New precision computed : should not be bellow self._epsSolver2Ref
# -- New precision computed : should not be bigger than self._accuracySolver2Old
self._accuracySolver2 = conv * self._data.getOutputDoubleValue('Residual1') / self._epsSolver1Ref * self._epsSolver2Ref
self._accuracySolver2 = min(self._accuracySolver2, self._accuracySolver2Old)
self._accuracySolver2 = max(self._accuracySolver2, self._epsSolver2Ref)
else:
self._accuracySolver2 = self._epsSolver2Ref
self._accuracySolver2Old = self._accuracySolver2
self._solver2.setInputDoubleValue("Accuracy", self._accuracySolver2)
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Dynamic Residual Balance iteration {} accuracies: {} ; {}".format(self._iter, accuracySolver1, self._accuracySolver2))
# -- Computation of Solver2 with the new precision computed
self._solver2.solve()
self._exchanger2to1.exchange()
succeed = self._solver1.getSolveStatus() and self._solver2.getSolveStatus()
self._iter += 1
return succeed, converged
def getIterateStatus(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.getSolveStatus(). """
return PhysicsDriver.getIterateStatus(self)
def getSolveStatus(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.getSolveStatus(). """
return PhysicsDriver.getSolveStatus(self)
def initTimeStep(self, dt):
"""! See c3po.PhysicsDriver.PhysicsDriver.initTimeStep(). """
self._iter = 0
return Coupler.initTimeStep(self, dt) | /salome_c3po-2.2-py3-none-any.whl/c3po/couplers/DynamicResidualBalanceCoupler.py | 0.775137 | 0.268039 | DynamicResidualBalanceCoupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class JFNKCoupler. """
from __future__ import print_function, division
import math
import numpy as np
from c3po.Coupler import Coupler
from c3po.CollaborativeDataManager import CollaborativeDataManager
from c3po.services.Printer import Printer
def solveTriang(matrixA, vectorB):
"""! INTERNAL.
Solves a triangular linear system.
"""
dim = vectorB.shape[0]
resu = np.zeros(shape=(dim))
for i in range(dim - 1, -1, -1):
resu[i] = vectorB[i]
for j in range(dim - 1, i, -1):
resu[i] -= matrixA[i, j] * resu[j]
resu[i] /= matrixA[i, i]
return resu
class JFNKCoupler(Coupler):
"""! JFNKCoupler inherits from Coupler and proposes a Jacobian-Free Newton Krylov coupling algorithm.
This is a Newton algorithm using a Krylov (GMRES) method for the linear system solving.
The Jacobian matrix is not computed, but the product of the jacobian matrix with a vector v is approximated by a Taylor formula (J_u is the jacobian of F at the point u):
J_u v ~= (F(u + epsilon v) - F(u))/epsilon
epsilon is a parameter of the algorithm. Its default value is 1E-4. Call setEpsilon() to change it
JFNKCoupler is a Coupler working with :
- A single PhysicsDriver (possibly a Coupler) defining the calculations to be made each time F is called.
- A list of DataManager allowing to manipulate the data in the coupling.
- Two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
Each DataManager is normalized with its own norm got after the first iteration.
They are then used as a single DataManager using CollaborativeDataManager.
As the Newton algorithm solves for F(X) = 0, in order to be coherent with the fixed point coupling algorithms, F(x) is defined as F(X) = f(X) - X, where f is the output of the physicsDriver.
The convergence criteria is : ||f(X^{n}) - X^{n}|| / ||f(X^{n})|| < tolerance. The default norm used is the infinite norm. Coupler.setNormChoice() allows to choose another one.
The default Newton tolerance is 1.E-6. Call setConvergenceParameters() to change it.
The default maximum Newton number of iterations is 10. Call setConvergenceParameters() to change it.
The default Krylov tolerance is 1.E-4. Call setKrylovConvergenceParameters() to change it.
The default maximum Krylov iteration is 100. Call setKrylovConvergenceParameters() to change it.
"""
def __init__(self, physics, exchangers, dataManagers):
"""! Build a JFNKCoupler object.
@param physics list of only one PhysicsDriver (possibly a Coupler).
@param exchangers list of exactly two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
@param dataManagers list of DataManager.
"""
Coupler.__init__(self, physics, exchangers, dataManagers)
self._newtonTolerance = 1.E-6
self._newtonMaxIter = 10
self._krylovTolerance = 1.E-4
self._krylovMaxIter = 100
self._epsilon = 1.E-4
self._iterationPrinter = Printer(2)
self._leaveIfFailed = False
if not isinstance(physics, list) or not isinstance(exchangers, list) or not isinstance(dataManagers, list):
raise Exception("JFNKCoupler.__init__ physics, exchangers and dataManagers must be lists!")
if len(physics) != 1:
raise Exception("JFNKCoupler.__init__ There must be only one PhysicsDriver")
if len(exchangers) != 2:
raise Exception("JFNKCoupler.__init__ There must be exactly two Exchanger")
def setConvergenceParameters(self, tolerance, maxiter):
"""! Set the convergence parameters (tolerance and maximum number of iterations).
@param tolerance the convergence threshold in ||f(X^{n}) - X^{n}|| / ||f(X^{n})|| < tolerance.
@param maxiter the maximal number of iterations.
"""
self._newtonTolerance = tolerance
self._newtonMaxIter = maxiter
def setKrylovConvergenceParameters(self, tolerance, maxiter):
"""! Set the convergence parameters (tolerance and maximum number of iterations) of the Krylov method.
@param tolerance the convergence threshold of the Krylov method.
@param maxiter the maximal number of iterations of the Krylov method.
"""
self._krylovTolerance = tolerance
self._krylovMaxIter = maxiter
def setEpsilon(self, epsilon):
"""! Set the epsilon value of the method.
@param epsilon the epsilon value in the formula J_u v ~= (F(u + epsilon v) - F(u))/epsilon.
"""
self._epsilon = epsilon
def setPrintLevel(self, level):
"""! Set the print level during iterations (0=None, 1 keeps last iteration, 2 prints every iteration).
@param level integer in range [0;2]. Default: 2.
"""
if not level in [0, 1, 2]:
raise Exception("JFNKCoupler.setPrintLevel level should be one of [0, 1, 2]!")
self._iterationPrinter.setPrintLevel(level)
def setFailureManagement(self, leaveIfSolvingFailed):
"""! Set if iterations should continue or not in case of solver failure (solveTimeStep returns False).
@param leaveIfSolvingFailed set False to continue the iterations, True to stop. Default: False.
"""
self._leaveIfFailed = leaveIfSolvingFailed
def solveTimeStep(self):
"""! Solve a time step using Jacobian-Free Newton Krylov algorithm.
See also c3po.PhysicsDriver.PhysicsDriver.solveTimeStep().
"""
physics = self._physicsDrivers[0]
physics2Data = self._exchangers[0]
data2physics = self._exchangers[1]
iterNewton = 0
iterKrylov = 0
residual = 0
previousData = 0
matrixQ = []
# On calcul ici l'etat "0"
physics.solve()
if self._leaveIfFailed and not physics.getSolveStatus():
return False
physics2Data.exchange()
data = CollaborativeDataManager(self._dataManagers)
normData = self.readNormData()
self.normalizeData(normData)
errorNewton = self._newtonTolerance + 1
while errorNewton > self._newtonTolerance and iterNewton < self._newtonMaxIter:
if iterNewton == 0:
residual = data.clone()
previousData = data.clone()
else:
residual.copy(data)
previousData.copy(data)
self.abortTimeStep()
self.initTimeStep(self._dt)
self.denormalizeData(normData)
data2physics.exchange()
physics.solve()
if self._leaveIfFailed and not physics.getSolveStatus():
return False
physics2Data.exchange()
self.normalizeData(normData)
residual -= data # residual is the second member of the linear system: -F(x) = -(f(x)-x)
norm2Residual = residual.norm2()
errorNewton = self.getNorm(residual) / self.getNorm(data)
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("JFNK Newton iteration {} initial error : {:.5e}".format(iterNewton, errorNewton))
if errorNewton > self._newtonTolerance:
if len(matrixQ) < 1:
matrixQ.append(residual * (1. / norm2Residual))
else:
matrixQ[0].copy(residual)
matrixQ[0] *= (1. / norm2Residual)
vectorH = np.zeros(shape=(1))
transposeO = np.zeros(shape=(1, 1))
transposeO[0, 0] = 1.
matrixR = np.zeros(shape=(1, 0))
krylovResidual = np.zeros(shape=(1))
krylovResidual[0] = norm2Residual
errorKrylov = self._krylovTolerance + 1
iterKrylov = 0
while errorKrylov > self._krylovTolerance and iterKrylov < self._krylovMaxIter:
iterKrylov += 1
data.copy(previousData)
data.imuladd(self._epsilon, matrixQ[iterKrylov - 1])
self.abortTimeStep()
self.initTimeStep(self._dt)
self.denormalizeData(normData)
data2physics.exchange()
physics.solve()
if self._leaveIfFailed and not physics.getSolveStatus():
return False
physics2Data.exchange()
self.normalizeData(normData)
data -= previousData
data.imuladd(-self._epsilon, matrixQ[iterKrylov - 1])
if len(matrixQ) < iterKrylov + 1:
matrixQ.append(data + residual)
else:
matrixQ[iterKrylov].copy(data)
matrixQ[iterKrylov] += residual
matrixQ[iterKrylov] *= 1. / self._epsilon
for i in range(iterKrylov):
vectorH[i] = matrixQ[i].dot(matrixQ[iterKrylov])
matrixQ[iterKrylov].imuladd(-vectorH[i], matrixQ[i])
vectorH = np.append(vectorH, matrixQ[iterKrylov].norm2())
matrixQ[iterKrylov] *= 1. / vectorH[-1]
# Ajout des nouvelles ligne/colonne a O
tmpO = np.zeros(shape=(transposeO.shape[0] + 1, transposeO.shape[1] + 1))
tmpO[0:transposeO.shape[0], 0:transposeO.shape[1]] += transposeO
transposeO = tmpO
transposeO[transposeO.shape[0] - 1, transposeO.shape[1] - 1] = 1.
vectorH = np.dot(transposeO, vectorH)
rot = np.zeros(shape=(iterKrylov + 1, iterKrylov + 1))
for i in range(iterKrylov - 1):
rot[i, i] = 1.
normtmp = math.sqrt(vectorH[-2] * vectorH[-2] + vectorH[-1] * vectorH[-1])
rot[iterKrylov - 1, iterKrylov - 1] = vectorH[-2] / normtmp
rot[iterKrylov, iterKrylov] = vectorH[-2] / normtmp
rot[iterKrylov - 1, iterKrylov] = vectorH[-1] / normtmp
rot[iterKrylov, iterKrylov - 1] = -vectorH[-1] / normtmp
transposeO = np.dot(rot, transposeO)
# Ajout des nouvelles ligne/colonne a matrixR
tmpR = np.zeros(shape=(matrixR.shape[0] + 1, matrixR.shape[1] + 1))
tmpR[0:matrixR.shape[0], 0:matrixR.shape[1]] += matrixR
matrixR = tmpR
for i in range(iterKrylov + 1):
matrixR[i, matrixR.shape[1] - 1] = vectorH[i]
matrixR = np.dot(rot, matrixR)
krylovResidual = np.append(krylovResidual, 0.)
krylovResidual = np.dot(rot, krylovResidual)
errorKrylov = abs(krylovResidual[-1]) / norm2Residual
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print(" JFNK Krylov iteration {} error : {:.5e}".format(iterKrylov - 1, errorKrylov))
squareR = matrixR[0:iterKrylov, 0:iterKrylov]
reduceKrylovResidual = krylovResidual[0:iterKrylov]
krylovResu = solveTriang(squareR, reduceKrylovResidual)
data.copy(previousData)
for i in range(iterKrylov):
data.imuladd(krylovResu[i], matrixQ[i])
iterNewton += 1
if self._iterationPrinter.getPrintLevel() == 1:
self._iterationPrinter.reprint(tmplevel=2)
self.denormalizeData(normData)
return physics.getSolveStatus() and errorNewton <= self._newtonTolerance | /salome_c3po-2.2-py3-none-any.whl/c3po/couplers/JFNKCoupler.py | 0.898053 | 0.277363 | JFNKCoupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contains the class AdaptiveResidualBalanceCoupler. """
from __future__ import print_function, division
from c3po.PhysicsDriver import PhysicsDriver
from c3po.Coupler import Coupler
from c3po.LocalDataManager import LocalDataManager
from c3po.services.Printer import Printer
class AdaptiveResidualBalanceCoupler(Coupler):
"""! AdaptiveResidualBalanceCoupler inherits from Coupler and proposes a adaptive residual balance algorithm.
This algorithm is designed to coupled two solvers using an iterative procedure.
It controls the accuracy required to each solver in order to limit over-solving and make them converge together.
See Senecal J. "Development of an efficient tightly coupled method for multiphysics reactor transient analysis" for details (for instance: https://www.sciencedirect.com/science/article/pii/S0149197017302676).
AdaptiveResidualBalanceCoupler works with :
- Two PhysicsDriver, one for each solver. They must implement the iterateTimeStep() method, together with the possibilities to get residual and set target accuracy.
- Four Exchanger: two for exchanges between the PhysicsDriver, and two to get residuals.
- One LocalDataManager (not just a DataManager) which contains the residuals got with the last two exchangers.
@note Two Exchanger and a DataManager are used to access the residuals in order to support all possible MPI schemes.
The default target accuracies are 1e-4 and the default maximum number of iterations is 100. Use setConvergenceParameters() to change these values.
The algorithm takes one parameter per solver, called initial convergence rate. They are set to 0.1 by default. Use setConvRateInit() to change them.
It may be interesting to use a FixedPointCoupler to add a damping factor and to control the coupling error.
In this case :
- The option setUseIterate(True) of FixedPointCoupler must be used.
- The maximal number of iterations provided to AdaptiveResidualBalanceCoupler.setTargetResiduals() is ignored.
- The exchanger '2to1' is also probably redondant with the FixedPointCoupler exchangers and, in this case, can be set to do nothing.
"""
def __init__(self, physics, exchangers, dataManagers):
"""! Build a AdaptiveResidualBalanceCoupler object.
@param physics list (or dict with keys ['Solver1', 'Solver2']) of two PhysicsDriver. If a list is used, it has to be provided in the same order than the keys here.
The provided PhysicsDriver must implement the iterateTimeStep() method (together with solveTimeStep()) and accept new accuracy (for the solveTimeStep() method) through setInputDoubleValue('Accuracy', value).
@param exchangers list (or dict with keys ['1to2', '2to1', 'Residual1', 'Residual2']) of four Exchanger. If a list is used, it has to be provided in the same order than the keys here.
@param dataManagers list (or dict with keys ['Residuals']) of one LocalDataManager (not just a DataManager).
The residuals must be stored in this DataManager as double values under the names 'Residual1' and 'Residual2'.
"""
Coupler.__init__(self, physics, exchangers, dataManagers)
if not isinstance(physics, (dict, list)):
raise Exception("AdaptiveResidualBalanceCoupler.__init__ physics must be either a dictionary or a list.")
if len(physics) != 2:
raise Exception("AdaptiveResidualBalanceCoupler.__init__ There must be exactly two PhysicsDriver, not {}.".format(len(physics)))
if isinstance(physics, dict):
for key in physics.keys():
if key not in ["Solver1", "Solver2"]:
raise Exception('AdaptiveResidualBalanceCoupler.__init__ if physics is provided as a dictionary, the keys must be : ["Solver1", "Solver2"]. We found : {}.'.format(list(physics.keys())))
self._solver1 = physics["Solver1"]
self._solver2 = physics["Solver2"]
else:
self._solver1 = physics[0]
self._solver2 = physics[1]
if not isinstance(exchangers, (dict, list)):
raise Exception("AdaptiveResidualBalanceCoupler.__init__ exchangers must be either a dictionary or a list.")
if len(exchangers) != 4:
raise Exception("AdaptiveResidualBalanceCoupler.__init__ There must be exactly four Exchanger, not {}.".format(len(exchangers)))
if isinstance(exchangers, dict):
for key in exchangers.keys():
if key not in ["1to2", "2to1", "Residual1", "Residual2"]:
raise Exception('AdaptiveResidualBalanceCoupler.__init__ if exchangers is provided as a dictionary, the keys must be : ["1to2", "2to1", "Residual1", "Residual2"]. We found : {}.'.format(list(exchangers.keys())))
self._exchanger1to2 = exchangers["1to2"]
self._exchanger2to1 = exchangers["2to1"]
self._exchangerResidual1 = exchangers["Residual1"]
self._exchangerResidual2 = exchangers["Residual2"]
else:
self._exchanger1to2 = exchangers[0]
self._exchanger2to1 = exchangers[1]
self._exchangerResidual1 = exchangers[2]
self._exchangerResidual2 = exchangers[3]
if not isinstance(dataManagers, (dict, list)):
raise Exception("AdaptiveResidualBalanceCoupler.__init__ dataManagers must be either a dictionary or a list.")
if len(dataManagers) != 1:
raise Exception("AdaptiveResidualBalanceCoupler.__init__ There must be exactly one DataManager, not {}.".format(len(dataManagers)))
if isinstance(dataManagers, dict):
for key in dataManagers.keys():
if key not in ["Residuals"]:
raise Exception('AdaptiveResidualBalanceCoupler.__init__ if dataManagers is provided as a dictionary, the keys must be : ["Residuals"]. We found : {}.'.format(list(dataManagers.keys())))
self._data = dataManagers["Residuals"]
else:
self._data = dataManagers[0]
if not isinstance(self._data, LocalDataManager):
raise Exception("AdaptiveResidualBalanceCoupler.__init__ The provided Datamanager must be a LocalDataManager.")
self._iterationPrinter = Printer(2)
self._leaveIfFailed = False
self._epsSolver1Ref = 1e-4
self._convRateSolver1Initial = 0.1
self._accuracySolver1 = 0.
self._epsSolver2Ref = 1e-4
self._residualSolver2Initial = 0.
self._convRateSolver2Initial = 0.1
self._accuracySolver2 = 0.
self._residualTotal = 0.
self._residualHalfTotal = 0.
self._iter = 0
self._maxiter = 100
def setConvergenceParameters(self, targetResidualSolver1, targetResidualSolver2, maxiter):
"""! Set the convergence parameters (target residuals for each solver and maximum number of iterations).
@param targetResidualSolver1 target residual for solver 1. Default value: 1.E-4.
@param targetResidualSolver2 target residual for solver 2. Default value: 1.E-4.
@param maxiter the maximal number of iterations. Default value: 100.
"""
self._epsSolver1Ref = targetResidualSolver1
self._epsSolver2Ref = targetResidualSolver2
self._maxiter = maxiter
def setConvRateInit(self, convRateSolver1Initial, convRateSolver2Initial):
"""! Set the initial convergence rates.
@param convRateSolver1Initial initial convergence rate for solver 1. Default value: 0.1
@param convRateSolver2Initial initial convergence rate for solver 2. Default value: 0.1
"""
self._convRateSolver1Initial = convRateSolver1Initial
self._convRateSolver2Initial = convRateSolver2Initial
def setPrintLevel(self, level):
"""! Set the print level during iterations (0=None, 1 keeps last iteration, 2 prints every iteration).
@param level integer in range [0;2]. Default: 2.
"""
if not level in [0, 1, 2]:
raise Exception("AdaptiveResidualBalanceCoupler.setPrintLevel level should be one of [0, 1, 2]!")
self._iterationPrinter.setPrintLevel(level)
def setFailureManagement(self, leaveIfSolvingFailed):
"""! Set if iterations should continue or not in case of solver failure (solveTimeStep returns False).
@param leaveIfSolvingFailed set False to continue the iterations, True to stop. Default: False.
"""
self._leaveIfFailed = leaveIfSolvingFailed
def solveTimeStep(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.solveTimeStep(). """
converged = False
succeed = True
while (succeed or not self._leaveIfFailed) and (not converged) and self._iter < self._maxiter:
self.iterate()
succeed, converged = self.getIterateStatus()
if self._iterationPrinter.getPrintLevel() == 1:
self._iterationPrinter.reprint(tmplevel=2)
return succeed and converged
def iterateTimeStep(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.iterateTimeStep(). """
converged = False
if self._iter == 0:
# -- Initial residual for Solver1, obtained from a first iteration during the initialisation
self._solver1.iterate()
self._exchangerResidual1.exchange()
residualSolver1Initial = self._data.getOutputDoubleValue("Residual1")
# -- Computation of the initial value of the normalized total residual
self._residualTotal = residualSolver1Initial / self._epsSolver1Ref
# -- Convergence criteria for Solver1
self._accuracySolver1 = self._convRateSolver1Initial * residualSolver1Initial
self._solver1.setInputDoubleValue('Accuracy', self._accuracySolver1)
# -- First iteration for Solver1
self._solver1.solve()
# -- Get the precision reached by Solver1 after the first iteration
self._exchangerResidual1.exchange()
# -- Exchange physical data between Solver1 and Solver2
self._exchanger1to2.exchange()
# -- Computation of the initial residual for Solver2
self._solver2.iterate()
self._exchangerResidual2.exchange()
self._residualSolver2Initial = self._data.getOutputDoubleValue('Residual2')
# -- Initial value of the total "demi residual"
self._residualHalfTotal = self._residualSolver2Initial / self._epsSolver2Ref + self._data.getOutputDoubleValue('Residual1') / self._epsSolver1Ref
# -- Convergence criteria for Solver2
self._accuracySolver2 = self._convRateSolver2Initial / 2. * residualSolver1Initial / self._epsSolver1Ref * self._epsSolver2Ref
self._solver2.setInputDoubleValue('Accuracy', self._accuracySolver2)
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Adaptive Residual Balance iteration {} accuracies: {} ; {}".format(self._iter, self._accuracySolver1, self._accuracySolver2))
# -- First iteration for Solver2
self._solver2.solve()
# -- End of the first multiphysics iteration
else:
self._solver1.abortTimeStep()
self._solver1.initTimeStep(self._dt)
self._solver2.abortTimeStep()
self._solver2.initTimeStep(self._dt)
# -- Exchange precision reached by Solver2 and compute current partial residual and intermediate coeff
self._exchangerResidual2.exchange()
residualPartial = self._data.getOutputDoubleValue('Residual2') / self._epsSolver2Ref
solver2ResidualScaled = self._residualSolver2Initial / self._epsSolver2Ref * self._epsSolver1Ref / 2.
lastResidual = self._residualTotal
# -- Computation of the initial residual for Solver1
self._solver1.iterate()
self._exchangerResidual1.exchange()
residualSolver1Initial = self._data.getOutputDoubleValue("Residual1")
# -- Compute total residual and convergence rate
self._residualTotal = residualSolver1Initial / self._epsSolver1Ref + residualPartial
convRateSolver1 = self._residualTotal / lastResidual
# -- Deal with the new precision computed: we don't want a new precision smaller than the targeted one! And if one solver reachs its targeted precision, the one for the second solver is also set to its targeted value
if self._accuracySolver1 > self._epsSolver1Ref:
# -- Computation of the new precision for Solver1
self._accuracySolver1 = convRateSolver1 * solver2ResidualScaled
self._accuracySolver1 = max(self._accuracySolver1, self._epsSolver1Ref)
if self._accuracySolver1 == self._epsSolver1Ref:
self._accuracySolver2 = self._epsSolver2Ref
self._solver2.setInputDoubleValue('Accuracy', self._epsSolver2Ref)
converged = True
self._solver1.setInputDoubleValue('Accuracy', self._accuracySolver1)
else:
converged = True
accuracy1ToPrint = self._accuracySolver1
# -- Computation of Solver1 with the new precision computed
self._solver1.solve()
# -- Exchange physical data between Solver1 and Solver2
self._exchanger1to2.exchange()
# -- Exchange reached precision by Solver1
self._exchangerResidual1.exchange()
# -- Deal with the new precision computed: we don't want a new precision smaller than the targeted one! And if one solver reachs its targeted precision, the one for the second solver is also set to its targeted value
if self._accuracySolver2 > self._epsSolver2Ref:
# -- Computation of the initial residual for Solver2
self._solver2.iterate()
self._exchangerResidual2.exchange()
self._residualSolver2Initial = self._data.getOutputDoubleValue('Residual2')
# -- Computation of total current total residual
residualDemiTotalOld = self._residualHalfTotal
self._residualHalfTotal = self._residualSolver2Initial / self._epsSolver2Ref + self._data.getOutputDoubleValue('Residual1') / self._epsSolver1Ref
# -- Convergence rate for Solver2
convRateSolver2 = self._residualHalfTotal / residualDemiTotalOld
# -- Computation of the new precision for Solver2
self._accuracySolver2 = convRateSolver2 / 2. * residualSolver1Initial / self._epsSolver1Ref * self._epsSolver2Ref
if self._accuracySolver2 < self._epsSolver2Ref:
self._accuracySolver2 = self._epsSolver2Ref
self._accuracySolver1 = self._epsSolver1Ref
self._solver1.setInputDoubleValue('Accuracy', self._epsSolver1Ref)
self._solver2.setInputDoubleValue('Accuracy', self._accuracySolver2)
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("Adaptive Residual Balance iteration {} accuracies: {} ; {}".format(self._iter, accuracy1ToPrint, self._accuracySolver2))
# -- Computation of Solver2 with the new precision computed
self._solver2.solve()
self._exchanger2to1.exchange()
succeed = self._solver1.getSolveStatus() and self._solver2.getSolveStatus()
self._iter += 1
return succeed, converged
def getIterateStatus(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.getSolveStatus(). """
return PhysicsDriver.getIterateStatus(self)
def getSolveStatus(self):
"""! See c3po.PhysicsDriver.PhysicsDriver.getSolveStatus(). """
return PhysicsDriver.getSolveStatus(self)
def initTimeStep(self, dt):
"""! See c3po.PhysicsDriver.PhysicsDriver.initTimeStep(). """
self._iter = 0
return Coupler.initTimeStep(self, dt) | /salome_c3po-2.2-py3-none-any.whl/c3po/couplers/AdaptiveResidualBalanceCoupler.py | 0.789477 | 0.235218 | AdaptiveResidualBalanceCoupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class CrossedSecantCoupler. """
from __future__ import print_function, division
from c3po.Coupler import Coupler
from c3po.CollaborativeDataManager import CollaborativeDataManager
from c3po.services.Printer import Printer
class CrossedSecantCoupler(Coupler):
"""! CrossedSecantCoupler inherits from Coupler and proposes a fixed point algorithm with crossed secant acceleration.
The class proposes an algorithm for the resolution of F(X) = X. Thus CrossedSecantCoupler is a Coupler working with :
- A single PhysicsDriver (possibly a Coupler) defining the calculations to be made each time F is called.
- A list of DataManager allowing to manipulate the data in the coupling (the X).
- Two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
Each DataManager is normalized with its own norm got after the first iteration.
They are then used as a single DataManager using CollaborativeDataManager.
At each iteration we do (with n the iteration number):
X^{n+1} = F(X^{n}) - ( F(X^{n}) - X^{n} ) * [ ( F(X^{n}) - F(X^{n-1}) ) .dot. ( F(X^{n}) - X^{n} - ( F(X^{n-1}) - X^{n-1} ) ) ] / (|| F(X^{n}) - X^{n} - ( F(X^{n-1}) - X^{n-1} ) ||**2)
The convergence criteria is : ||F(X^{n}) - X^{n}|| / ||F(X^{n})|| < tolerance. The default norm used is the infinite norm. setNormChoice() allows to choose another one.
The default value of tolerance is 1.E-6. Call setConvergenceParameters() to change it.
The default maximum number of iterations is 100. Call setConvergenceParameters() to change it.
"""
def __init__(self, physics, exchangers, dataManagers):
"""! Build a CrossedSecantCoupler object.
@param physics list of only one PhysicsDriver (possibly a Coupler).
@param exchangers list of exactly two Exchanger allowing to go from the PhysicsDriver to the DataManager and vice versa.
@param dataManagers list of DataManager.
"""
Coupler.__init__(self, physics, exchangers, dataManagers)
self._tolerance = 1.E-6
self._maxiter = 100
self._iterationPrinter = Printer(2)
if not isinstance(physics, list) or not isinstance(exchangers, list) or not isinstance(dataManagers, list):
raise Exception("CrossedSecantCoupler.__init__ physics, exchangers and dataManagers must be lists!")
if len(physics) != 1:
raise Exception("CrossedSecantCoupler.__init__ There must be only one PhysicsDriver")
if len(exchangers) != 2:
raise Exception("CrossedSecantCoupler.__init__ There must be exactly two Exchanger")
def setConvergenceParameters(self, tolerance, maxiter):
"""! Set the convergence parameters (tolerance and maximum number of iterations).
@param tolerance the convergence threshold in ||F(X^{n}) - X^{n}|| / ||X^{n+1}|| < tolerance.
@param maxiter the maximal number of iterations.
"""
self._tolerance = tolerance
self._maxiter = maxiter
def setPrintLevel(self, level):
"""! Set the print level during iterations (0=None, 1 keeps last iteration, 2 prints every iteration).
@param level integer in range [0;2]. Default: 2.
"""
if not level in [0, 1, 2]:
raise Exception("FixedPointCoupler.setPrintLevel level should be one of [0, 1, 2]!")
self._iterationPrinter.setPrintLevel(level)
def solveTimeStep(self):
"""! Solve a time step using the damped fixed-point algorithm.
See also c3po.PhysicsDriver.PhysicsDriver.solveTimeStep().
"""
iiter = 0
error = self._tolerance + 1.
physics = self._physicsDrivers[0]
physics2Data = self._exchangers[0]
data2physics = self._exchangers[1]
# Initialisation : iteration 0
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("crossed secant iteration {} ".format(iiter))
physics.solve()
physics2Data.exchange()
data = CollaborativeDataManager(self._dataManagers)
normData = self.readNormData()
self.normalizeData(normData)
diffData = data.clone()
iiter += 1
# First iteration without acceleration
self.abortTimeStep()
self.initTimeStep(self._dt)
self.denormalizeData(normData)
data2physics.exchange()
physics.solve()
physics2Data.exchange() # data = G(X0) , previousData = X0
self.normalizeData(normData)
diffData -= data
diffDataOld = diffData.clone() # G(x0) - x0
error = self.getNorm(diffData) / self.getNorm(data)
iiter += 1
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("crossed secant iteration {} error : {:.5e} ".format(iiter - 1, error))
dataOld = data.clone() # dataOld = X1 = G(x0)
diffData.copy(data)
while error > self._tolerance and iiter < self._maxiter:
self.abortTimeStep()
self.initTimeStep(self._dt)
self.denormalizeData(normData)
data2physics.exchange()
physics.solve()
physics2Data.exchange()
self.normalizeData(normData)
diffData -= data
error = self.getNorm(diffData) / self.getNorm(data)
iiter += 1
if self._iterationPrinter.getPrintLevel() > 0:
self._iterationPrinter.print("crossed secant iteration {} error : {:.5e} ".format(iiter - 1, error))
if error > self._tolerance:
dataOld -= data
diffDataOld -= diffData
normDenominator = diffDataOld.norm2()
factor = - dataOld.dot(diffDataOld) / (normDenominator * normDenominator)
dataOld.copy(data)
diffDataOld.copy(diffData)
diffData *= factor
data += diffData
diffData.copy(data)
if self._iterationPrinter.getPrintLevel() == 1:
self._iterationPrinter.reprint(tmplevel=2)
self.denormalizeData(normData)
return physics.getSolveStatus() and error <= self._tolerance | /salome_c3po-2.2-py3-none-any.whl/c3po/couplers/CrossedSecantCoupler.py | 0.860779 | 0.311728 | CrossedSecantCoupler.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class MPICollaborativeDataManager. """
from __future__ import print_function, division
import math
from mpi4py import MPI
from c3po.CollaborativeDataManager import CollaborativeDataManager
from c3po.CollaborativeObject import CollaborativeObject
from c3po.mpi.MPIRemote import MPIRemote
from c3po.mpi.MPIDomainDecompositionDataManager import MPIDomainDecompositionDataManager
from c3po.mpi.MPICollectiveDataManager import MPICollectiveDataManager
class MPICollaborativeDataManager(CollaborativeDataManager):
"""! MPICollaborativeDataManager is the MPI collaborative version of c3po.CollaborativeDataManager.CollaborativeDataManager
(for collaborative MPI paradigm).
It allows to handle a set of c3po.DataManager.DataManager (some of then being remote) as a single one. Thanks to this class,
data can be distributed on different MPI processes but still used in the same way.
When at least one MPIRemote is present, MPICollaborativeDataManager uses collective MPI communications: the object must
be built and used in the same way for all the involved processes. They must all share the same communicator, and all the processes
of that communicator must be involved.
"""
def __init__(self, dataManagers, mpiComm=None):
"""! Build a MPICollaborativeDataManager object.
Has the same form than CollaborativeDataManager.__init__() but can also contain MPIRemote objects.
When at least one MPIRemote is present (or if mpiComm is not None), MPICollaborativeDataManager uses collective MPI
communications: the object must be built and used in the same way for all the involved processes. They must all share the same
communicator, and all the processes of that communicator must be involved.
@param dataManagers a list of c3po.DataManager.DataManager.
@param mpiComm If not None, forces MPICollaborativeDataManager to use collective MPI communications and to use this
communicator.
"""
localData = []
self.mpiComm = mpiComm
self.isMPI = mpiComm is not None
indexToIgnore = []
for data in dataManagers:
if mpiComm is None and isinstance(data, MPIRemote):
if not self.isMPI:
if data.mpiComm == MPI.COMM_NULL:
raise Exception("MPICollaborativeDataManager.__init__ All distant processes must be part of the communicator (MPI.COMM_NULL found).")
self.isMPI = True
self.mpiComm = data.mpiComm
else:
if self.mpiComm != data.mpiComm:
raise Exception("MPICollaborativeDataManager.__init__ All distant processes must used the same MPI communicator")
if not isinstance(data, MPIRemote):
if isinstance(data, MPICollaborativeDataManager):
localData += data.dataManagers
elif isinstance(data, MPIDomainDecompositionDataManager):
localView = data.getLocalView()
localData.append(localView)
else:
if isinstance(data, MPICollectiveDataManager) and data.mpiComm.Get_rank() != 0:
indexToIgnore.append(len(localData))
localData.append(data)
CollaborativeDataManager.__init__(self, localData)
self.ignoreForConstOperators(indexToIgnore)
CollaborativeObject.__init__(self, dataManagers) # pylint: disable=non-parent-init-called
def cloneEmpty(self):
"""! Return a clone of self without copying the data.
@return An empty clone of self.
"""
notMPIoutput = CollaborativeDataManager.cloneEmpty(self)
output = MPICollaborativeDataManager(notMPIoutput.dataManagers)
output.mpiComm = self.mpiComm
output.isMPI = self.isMPI
return output
def normMax(self):
"""! Return the infinite norm.
@return The max of the absolute values of the scalars and of the infinite norms of the MED fields.
"""
norm = CollaborativeDataManager.normMax(self)
if self.isMPI:
norm = self.mpiComm.allreduce(norm, op=MPI.MAX)
return norm
def norm2(self):
"""! Return the norm 2.
@return sqrt(sum_i(val[i] * val[i])) where val[i] stands for each scalar and each component of the MED fields.
"""
norm = CollaborativeDataManager.norm2(self)
#print("local :", self, norm)
if self.isMPI:
norm = self.mpiComm.allreduce(norm * norm, op=MPI.SUM)
norm = math.sqrt(norm)
#print("global :", self, norm)
return norm
def dot(self, other):
"""! Return the scalar product of self with other.
@param other a MPICollaborativeDataManager consistent with self.
@return the scalar product of self with other.
@throw Exception if self and other are not consistent.
"""
result = CollaborativeDataManager.dot(self, other)
if self.isMPI:
result = self.mpiComm.allreduce(result, op=MPI.SUM)
return result | /salome_c3po-2.2-py3-none-any.whl/c3po/mpi/MPICollaborativeDataManager.py | 0.78037 | 0.207395 | MPICollaborativeDataManager.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class MPIDomainDecompositionDataManager. """
from __future__ import print_function, division
import math
from mpi4py import MPI
from c3po.LocalDataManager import LocalDataManager
class MPIDomainDecompositionDataManager(LocalDataManager):
"""! MPIDomainDecompositionDataManager is the MPI collaborative version of the c3po.DataManager.DataManager in which
all processes have locally only a part of the data.
Can replace, without impact, a c3po.LocalDataManager.LocalDataManager for a calculation on a single process, if the MPI
environment is available.
MPIDomainDecompositionDataManager behaves a little bit like a MPICollaborativeDataManager built with only one LocalDataManager
and a mpiComm. However, unlike MPICollaborativeDataManager which will be expanded by MPIExchanger, and therefore seen as a
simple LocalDataManager in this case, MPIDomainDecompositionDataManager will be directly handled by MPIExchanger.
This is usually preferable when using a c3po.mpi.mpiExchangeMethods.MPIExchangeMethod.MPIExchangeMethod as exchange method.
"""
def __init__(self, mpiComm):
"""! Build a MPIDomainDecompositionDataManager object.
@param mpiComm MPI communicator. It must be shared by all processes involved in the MPIDomainDecompositionDataManager (and all processes
of this MPI communicator must be involed in the MPIDomainDecompositionDataManager).
"""
LocalDataManager.__init__(self)
self.mpiComm = mpiComm
def getMPIComm(self):
"""! (Optional) Return the MPI communicator used by the code for parallel computations.
@return (mpi4py.Comm) mpi4py communicator.
"""
return self.mpiComm
def cloneEmpty(self):
"""! Return a clone of self without copying the data.
@return An empty clone of self.
"""
output = MPIDomainDecompositionDataManager(self.mpiComm)
output.valuesInt = self.valuesInt
output.valuesString = self.valuesString
output.fieldsInt = self.fieldsInt
output.fieldsDoubleTemplates = self.fieldsDoubleTemplates
return output
def normMax(self):
"""! Return the infinite norm.
@return The max of the absolute values of the scalars and of the infinite norms of the MED fields.
"""
norm = LocalDataManager.normMax(self)
return self.mpiComm.allreduce(norm, op=MPI.MAX)
def norm2(self):
"""! Return the norm 2.
@return sqrt(sum_i(val[i] * val[i])) where val[i] stands for each scalar and each component of the MED fields.
"""
norm = LocalDataManager.norm2(self)
norm = self.mpiComm.allreduce(norm * norm, op=MPI.SUM)
return math.sqrt(norm)
def dot(self, other):
"""! Return the scalar product of self with other.
@param other a MPICollaborativeDataManager consistent with self.
@return the scalar product of self with other.
@throw Exception if self and other are not consistent.
"""
result = LocalDataManager.dot(self, other)
return self.mpiComm.allreduce(result, op=MPI.SUM)
def getLocalView(self):
"""! Return a new LocalDataManager that holds the same data than self
@return a LocalDataManager that holds the same data than self
"""
localView = LocalDataManager()
localView.valuesDouble = self.valuesDouble
localView.valuesInt = self.valuesInt
localView.valuesString = self.valuesString
localView.fieldsDouble = self.fieldsDouble
localView.fieldsInt = self.fieldsInt
localView.fieldsDoubleTemplates = self.fieldsDoubleTemplates
return localView | /salome_c3po-2.2-py3-none-any.whl/c3po/mpi/MPIDomainDecompositionDataManager.py | 0.863507 | 0.218148 | MPIDomainDecompositionDataManager.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contain the class MPISharedRemappingMulti1D3D. """
from __future__ import print_function, division
from c3po.exchangeMethods.SharedRemappingMulti1D3D import shift1DFields, Multi1D3DRemapper
from c3po.CollaborativeObject import CollaborativeObject
from c3po.mpi.MPIRemote import MPIRemote
from c3po.mpi.mpiExchangeMethods.MPISharedRemapping import MPISharedRemapping, MPIRemapper
class MPIMulti1D3DRemapper(MPIRemapper):
"""! Allow to share the mesh projection for different MPISharedRemappingMulti1D3D objects by building them with the same instance of this class. """
def __init__(self, xCoordinates, yCoordinates, indexTable, weights, physics, meshAlignment=False, offset=[0., 0., 0.], rescaling=1., rotation=0., outsideCellsScreening=False, reverseTransformations=True):
"""! Build a MPIMulti1D3DRemapper object, the MPI version of c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.
The constructor has the same form than c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__() with one additional mandatory parameter: physics.
@param xCoordinates see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__().
@param yCoordinates see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__().
@param indexTable see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__().
@param weights see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__().
@param physics The list (or a c3po.mpi.MPICollaborativePhysicsDriver.MPICollaborativePhysicsDriver) of the 1D c3po.PhysicsDriver.PhysicsDriver involved in the coupling. We just use it to identify remote ones.
@param meshAlignment see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper. The source mesh is the multi1D one and the target mesh the 3D one.
@param offset see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__(). The source mesh is the multi1D one and the target mesh the 3D one.
@param rescaling see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__(). The source mesh is the multi1D one and the target mesh the 3D one.
@param rotation see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__(). The source mesh is the multi1D one and the target mesh the 3D one.
@param outsideCellsScreening see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__().
@param reverseTransformations see c3po.exchangeMethods.SharedRemappingMulti1D3D.Multi1D3DRemapper.__init__().
@warning There seems to be a bug in MEDCoupling that may cause wrong results when rescaling is used with a source mesh (multi1D) of
nature IntensiveConservation. In this case, using reverseTransformations=False should be enough to solve the problem.
"""
MPIRemapper.__init__(self, meshAlignment, offset, rescaling, rotation, outsideCellsScreening, reverseTransformations)
physicsList = []
if isinstance(physics, CollaborativeObject):
physics = [physics]
for physic in physics:
if isinstance(physic, CollaborativeObject):
physicsList += physic.getElementsRecursively()
else:
physicsList.append(physic)
if len(physicsList) != max(indexTable) + 1:
raise Exception("MPIMulti1D3DRemapper.__init__ the number of elements of physics ({}) is not coherent with indexTable (whose max + 1 value is {}).".format(len(physicsList), max(indexTable) + 1))
self._globalToLocal = [0] * len(physicsList)
nextIndex = 0
newWeights = []
for iphy, physic in enumerate(physicsList):
if isinstance(physic, MPIRemote):
self._globalToLocal[iphy] = -1
else:
self._globalToLocal[iphy] = nextIndex
newWeights.append(weights[iphy])
nextIndex += 1
self._nbLocal1DFields = nextIndex
newIndexTable = [0] * len(indexTable)
for position, index1D in enumerate(indexTable):
newIndexTable[position] = self._globalToLocal[index1D]
self._localMulti1D3DRemapper = Multi1D3DRemapper(xCoordinates, yCoordinates, newIndexTable, newWeights)
self.isInnerFieldBuilt = False
self._globalIndexTable = [[] for _ in range(max(indexTable) + 1)]
for position, index1D in enumerate(indexTable):
if index1D >= 0:
self._globalIndexTable[index1D].append(position)
self._shiftedFieldPositions = list(range(len(self._globalIndexTable)))
def buildInnerField(self, meshes1D):
"""! INTERNAL """
self._localMulti1D3DRemapper.buildInnerField(meshes1D)
self.isInnerFieldBuilt = True
self.isInit = False
def getInnerField(self):
"""! INTERNAL """
return self._localMulti1D3DRemapper.getInnerField()
def build3DField(self, fields1D, defaultValue=0.):
"""! INTERNAL """
return self._localMulti1D3DRemapper.build3DField(fields1D, defaultValue)
def build1DFields(self, field3D):
"""! INTERNAL """
return self._localMulti1D3DRemapper.build1DFields(field3D)
def getNumberOf1DFields(self):
"""! INTERNAL """
return self._nbLocal1DFields
def shift1DFields(self, shiftMap):
"""! See Multi1D3DRemapper.shift1DFields() """
availableFields, self._shiftedFieldPositions, self._globalIndexTable = shift1DFields(shiftMap, self._shiftedFieldPositions, self._globalIndexTable)
tmpIndexTable = [[] for _ in range(self._nbLocal1DFields)]
for index1D, positions in enumerate(self._globalIndexTable):
if self._globalToLocal[index1D] >= 0:
tmpIndexTable[self._globalToLocal[index1D]] = positions
self._localMulti1D3DRemapper.setShiftedIndex([], tmpIndexTable)
self.isInnerFieldBuilt = False
return availableFields
class MPISharedRemappingMulti1D3D(MPISharedRemapping):
"""! MPISharedRemappingMulti1D3D is the MPI version of c3po.exchangeMethods.SharedRemappingMulti1D3D.SharedRemappingMulti1D3D. """
def __init__(self, remapper, reverse=False, defaultValue=0., linearTransform=(1., 0.)):
"""! Build a MPISharedRemappingMulti1D3D object, to be given to an c3po.mpi.MPIExchanger.MPIExchanger.
@param remapper A MPIMulti1D3DRemapper object performing the projection. It can thus be shared with other instances of
MPISharedRemappingMulti1D3D (its initialization will always be done only once).
@param reverse see c3po.exchangeMethods.SharedRemappingMulti1D3D.SharedRemappingMulti1D3D.__init__(). Direct is multi1D -> 3D, reverse is 3D -> multi1D.
@param defaultValue see c3po.exchangeMethods.SharedRemappingMulti1D3D.SharedRemappingMulti1D3D.__init__().
@param linearTransform see c3po.exchangeMethods.SharedRemappingMulti1D3D.SharedRemappingMulti1D3D.__init__().
"""
MPISharedRemapping.__init__(self, remapper, reverse, defaultValue, linearTransform)
self._numberOf1DFields = self._remapper.getNumberOf1DFields()
def __call__(self, fieldsToGet, fieldsToSet, valuesToGet):
"""! Project the input fields one by one before returning them as outputs, in the same order. """
numberOf1DFields = len(fieldsToSet) if self._isReverse else len(fieldsToGet)
if (numberOf1DFields != 0) if self._numberOf1DFields == 0 else (numberOf1DFields % self._numberOf1DFields != 0):
msg = "The number of provided 1D fields ({}) is wrong.\n".format(numberOf1DFields)
msg += "According to the provided remapper object, the number of 1D fields must be a multiple of {}.".format(self._numberOf1DFields)
raise Exception(msg)
if len(valuesToGet) != 0:
raise Exception("MPISharedRemappingMulti1D3D: we cannot deal with scalar values.")
if not self._remapper.isInnerFieldBuilt:
self._remapper.buildInnerField([field.getMesh() for field in (fieldsToSet if self._isReverse else fieldsToGet)])
if self._isReverse:
numberRemapping = 0 if numberOf1DFields == 0 else numberOf1DFields // self._numberOf1DFields
innerField = self._remapper.getInnerField()
if numberOf1DFields > 0:
innerField.setNature(fieldsToSet[0].getNature())
outputFields, outputValues = MPISharedRemapping.__call__(self, fieldsToGet, [innerField] * numberRemapping, valuesToGet)
resu = []
for field3D in outputFields:
resu += self._remapper.build1DFields(field3D)
return resu, outputValues
indexFirst = 0
intermediate3DField = []
if len(fieldsToGet) > 0:
while indexFirst + self._numberOf1DFields <= len(fieldsToGet):
fields1D = fieldsToGet[indexFirst:indexFirst + self._numberOf1DFields]
intermediate3DField.append(self._remapper.build3DField(fields1D, self._defaultValue))
indexFirst += self._numberOf1DFields
return MPISharedRemapping.__call__(self, intermediate3DField, fieldsToSet, valuesToGet)
def getPatterns(self):
"""! See ExchangeMethod.getPatterns. """
if self._isReverse:
return [(0, self._numberOf1DFields, 0, 0), (1, 0, 0, 0)]
return [(self._numberOf1DFields, 0, 0, 0), (0, 1, 0, 0)] | /salome_c3po-2.2-py3-none-any.whl/c3po/mpi/mpiExchangeMethods/MPISharedRemappingMulti1D3D.py | 0.810141 | 0.174235 | MPISharedRemappingMulti1D3D.py | pypi |
# Copyright (c) 2020, CEA
# All rights reserved.
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
""" Contains the class TransientLogger and its daughters Timekeeper and FortuneTeller. """
from __future__ import print_function, division
from abc import ABCMeta, abstractmethod
from datetime import datetime, timedelta
import time
from c3po.services.Printer import Printer
class TransientLogger(object):
"""! TransientLogger is the base class for the production of transient logging strings. """
__metaclass__ = ABCMeta
@abstractmethod
def initTransient(self, driver, tmax, finishAtTmax, stopIfStationary, presentTime):
"""! Method called before transient starts.
@param driver (c3po.PhysicsDriver.PhysicsDriver) The caller of solveTransient().
@param tmax (float) tmax argument given to solveTransient().
@param finishAtTmax (bool) finishAtTmax argument given to solveTransient().
@param stopIfStationary (bool) stopIfStationary argument given to solveTransient().
@param presentTime (float) Present time when calling solveTransient().
"""
raise NotImplementedError
@abstractmethod
def logAbort(self, dt, presentTime):
"""! Method called when time step is aborted.
@param dt (float) Size of the failed time step.
@param presentTime (float) Present time after calling abortTimeStep().
"""
raise NotImplementedError
@abstractmethod
def logValidate(self, dt, presentTime):
"""! Method called when time step is validated.
@param dt (float) Size of the validated time step.
@param presentTime (float) Present time after calling validateTimeStep().
"""
raise NotImplementedError
@abstractmethod
def terminateTransient(self, presentTime, stop, isStationary):
"""! Method called after the transient ends.
@param presentTime (float) Present time after the transient.
@param stop (bool) Indicate if the transient ends because of a stopping criteria.
@param isStationary (bool) Indicate if the stopping criteria is due to isStationary().
"""
raise NotImplementedError
class Timekeeper(TransientLogger):
"""! TransientLogger which provides information about transient progress. """
def __init__(self):
self._name = ""
self._initialTime = 0.0
self._tmax = 1e30
self._totalAbort = 0
self._stepAbort = 0
self._dtRange = (1.e30, 0.0)
def initTransient(self, driver, tmax, finishAtTmax, stopIfStationary, presentTime):
"""! See ``TransientLogger.initTransient``"""
self._name = driver.__class__.__name__
self._initialTime = presentTime
self._tmax = tmax
self._totalAbort = 0
self._stepAbort = 0
self._dtRange = (1.e30, 0.0)
return "{}: transient starts at {:9.3e}s, finishAtTmax = {}, stopIfStationary = {}".format(self._name, presentTime, finishAtTmax, stopIfStationary)
def logAbort(self, dt, presentTime):
"""! See ``TransientLogger.logAbort``"""
self._stepAbort += 1
return "{}: abort at {:9.3e}s, failed dt = {:9.3e}s".format(
self._name, presentTime, dt)
def _getProgressionStr(self, presentTime):
"""! INTERNAL """
return "{:9.3e}s".format(presentTime)
def logValidate(self, dt, presentTime):
"""! See ``TransientLogger.logValidate``"""
self._dtRange = (min(dt, self._dtRange[0]), max(dt, self._dtRange[1]))
toPrint = ("{}: validate at {}, dt = {:9.3e}s (#aborts={})".format(
self._name, self._getProgressionStr(presentTime), dt, self._stepAbort))
self._totalAbort += self._stepAbort
self._stepAbort = 0
return toPrint
def terminateTransient(self, presentTime, stop, isStationary):
"""! See ``TransientLogger.terminateTransient``"""
stopReason = "tmax is reached" if not stop else ("stationary is found" if isStationary else "computeTimeStep asks to stop")
toPrint = "{}: transient ends at {} because {}. Total #aborts = {}, dt range = {}s.".format(
self._name, self._getProgressionStr(presentTime), stopReason, self._totalAbort, self._dtRange)
return toPrint
class FortuneTeller(Timekeeper):
"""! Timekeeper which estimates in addition the duration of the transient
with Exponential Moving Average.
"""
def __init__(self, relaxation=0.3):
"""! Build a FortuneTeller object.
@param relaxation (float) Relaxation factor for the Exponential Moving Average. Default: 0.3.
"""
Timekeeper.__init__(self)
self._relaxation = min(1.0, relaxation)
self._simuRate = 0.0
self._realT0 = 0.0
self._ert = 1e30
def initTransient(self, driver, tmax, finishAtTmax, stopIfStationary, presentTime):
"""! See ``TransientLogger.initTransient``"""
self._simuRate = None
self._realT0 = time.time()
self._ert = 1e30
return Timekeeper.initTransient(self, driver, tmax, finishAtTmax, stopIfStationary, presentTime)
def _getProgression(self, presentTime):
"""! INTERNAL """
return min(1., (presentTime - self._initialTime) / (self._tmax - self._initialTime)) * 100.0
def _getProgressionStr(self, presentTime):
"""! INTERNAL """
return Timekeeper._getProgressionStr(self, presentTime) + " ({:6.2f} %)".format(
self._getProgression(presentTime))
def _getEstimatedRemainingTime(self, dt, presentTime):
"""! INTERNAL """
realT1 = time.time()
simuRate = (realT1 - self._realT0) / dt
self._realT0 = realT1
if self._simuRate is None:
self._simuRate = simuRate
self._simuRate = simuRate * self._relaxation + self._simuRate * (1. - self._relaxation)
return self._simuRate * (self._tmax - presentTime)
def logValidate(self, dt, presentTime):
"""! See ``TransientLogger.logValidate``"""
toPrint = Timekeeper.logValidate(self, dt, presentTime)
self._ert = self._getEstimatedRemainingTime(dt=dt, presentTime=presentTime)
if self._ert > 1.e-3:
toPrint += ", estimated final time {}".format(
(datetime.now() +
timedelta(seconds=int(self._ert))).strftime('%Y-%m-%d %H:%M:%S'))
return toPrint
class TransientPrinter(object):
"""! INTERNAL.
TransientPrinter writes information about transient in the standard output. """
def __init__(self, transientLogger):
"""! Build a TransientPrinter object.
@param transientLogger (c3po.services.TransientLogger.TransientLogger) The TransientLogger object to use.
"""
self._printer = Printer(0)
self._logger = transientLogger
def initTransient(self, driver, tmax, finishAtTmax, stopIfStationary, presentTime):
"""! See TransientLogger.initTransient. """
if self._printer.getPrintLevel() > 0:
self._printer.print(self._logger.initTransient(driver, tmax, finishAtTmax, stopIfStationary, presentTime), tmplevel=2)
def logAbort(self, dt, presentTime):
"""! See TransientLogger.logAbort. """
if self._printer.getPrintLevel() > 0:
self._printer.print(self._logger.logAbort(dt, presentTime))
def logValidate(self, dt, presentTime):
"""! See TransientLogger.logValidate. """
if self._printer.getPrintLevel() > 0:
self._printer.print(self._logger.logValidate(dt, presentTime))
def terminateTransient(self, presentTime, stop, isStationary):
"""! See TransientLogger.terminateTransient. """
if self._printer.getPrintLevel() > 0:
self._printer.print(self._logger.terminateTransient(presentTime, stop, isStationary), tmplevel=2)
def getPrinter(self):
"""! Return the Printer object used.
@return (c3po.services.Printer.Printer) the Printer object used.
"""
return self._printer
def setLogger(self, transientLogger):
"""! Set a new TransientLogger object.
@param transientLogger (c3po.services.TransientLogger.TransientLogger) The TransientLogger object to use.
"""
self._logger = transientLogger
def getLogger(self):
"""! Return the TransientLogger object used.
@return (c3po.services.TransientLogger.TransientLogger) The used TransientLogger object.
"""
return self._logger | /salome_c3po-2.2-py3-none-any.whl/c3po/services/TransientLogger.py | 0.93086 | 0.152821 | TransientLogger.py | pypi |
from rich.panel import Panel
from rich.progress import BarColumn, Progress, TextColumn
class Puller:
"""Pull classes will output progress and need to share the console as well as the Docker client"""
def __init__(self, console, docker_client, image):
self.console = console
self.client = docker_client
self.image = image
self.progress_module = Progress(
TextColumn("{task.description}"),
BarColumn(bar_width=999, complete_style="blue", finished_style="blue"),
refresh_per_second=4,
console=self.console,
expand=True,
)
def _print_pull_banner(self):
"""Tell the console user that the image is being pulled"""
pull_banner = Panel.fit(
f"The image [blue]{self.image}[default] is being downloaded from Docker Hub. This could take between 2 and 15 minutes depending on your internet connection and machine speed.",
title="[bold blue]Downloading the Base Image",
)
self.console.print(pull_banner)
def parse_output(self, generator):
"""Iterates over a docker pull generator"""
with self.progress_module as progress:
tasks = dict()
completed_task_counter = 0
# Add the main task as the first status message
master_task = next(generator)
tasks[master_task["id"]] = progress.add_task(
f"[blue]{master_task['status']}"
)
for message in generator:
status = message.get("status", False)
layer_id = message.get("id", False)
description = f"{status} {layer_id}"
# TODO: Python 3.10 is not released yet, but this function should use the match-case syntax instead of if-statements when it is released
if "Pulling fs layer" in status:
tasks[layer_id] = progress.add_task(description)
elif any(
substring in status for substring in ["Downloading", "Extracting"]
):
current = message["progressDetail"]["current"]
total = message["progressDetail"]["total"]
progress.update(
tasks[layer_id],
total=total,
completed=current,
description=description,
)
elif "Download complete" in status:
progress.update(
tasks[layer_id], total=1, completed=1, description=description
)
elif "Pull complete" in status:
completed_task_counter += 1
progress.remove_task(tasks[layer_id])
elif layer_id in tasks:
progress.update(tasks[layer_id], description=description)
elif "Digest" in status:
progress.update(tasks[master_task["id"]], description=status)
# Run this every iteration regardless of the status
progress.update(
tasks[master_task["id"]],
total=len(tasks) - 1,
completed=completed_task_counter,
)
def pull(self):
"""Pull the specified image from Docker Hub"""
self._print_pull_banner()
# Tell the API to pull an image (returns a generator with log output)
image = self.client.api.pull(repr(self.image), stream=True, decode=True)
# Let the parser determine what to print
self.parse_output(image)
# Tag the image locally
self.client.api.tag(str(self.image), self.image.local_name)
# If the process hasn't exited, it's likely a success
self.console.print_status(
f"Saloon has been installed and tagged as [blue]{self.image}[/blue]"
) | /saloon-0.1.2.tar.gz/saloon-0.1.2/cli/container/pull.py | 0.473657 | 0.280287 | pull.py | pypi |
import re
from collections import deque
from rich import box
from rich.live import Live
from rich.panel import Panel
from rich.progress import Progress, SpinnerColumn, TextColumn
from rich.table import Table
class Builder:
"""
Builds the image from source (requires full source repository)
You can find the Dockerfile in <project_root>/container/Dockerfile
"""
def __init__(self, console, docker_client, target_image, context):
self.console = console
self.client = docker_client
self.target_image = target_image
self.context = context
self.progress_module = Progress(
SpinnerColumn(),
TextColumn("[bright_black]{task.description}"),
console=self.console,
)
def _print_build_banner(self):
"""Tell the console user that the image is being pulled"""
build_banner = Panel.fit(
f"This will attempt to build the docker image locally. The output image will be tagged as [blue]{self.target_image}[/blue].",
title="[bold blue]Building image",
)
self.console.print(build_banner)
def _generate_table(self, header, messages):
"""Generate a table with 5 rows"""
table = Table(header, box=box.SIMPLE_HEAD, padding=(0, 0))
table.add_column()
for message in messages:
console_width = self.console.width - 4
shortened_message = message[0:console_width]
table.add_row(f"{shortened_message}", style="bright_black")
return table
def parse_output(self, generator):
"""Iterates over a docker build generator"""
tasks = list()
current_step = next(generator).get("stream")
step_matcher = re.compile("^Step\ \d+\/\d+\ :.*$")
with Live(transient=True, refresh_per_second=4) as live:
for message in generator:
stream = message.get("stream", None)
error = message.get("error", None)
if stream:
message = " ".join(stream.split())
if step_matcher.match(message):
# TODO: Sometimes this bit is a bit choppy with the Live view
self.console.success(current_step)
current_step = message
tasks.clear()
else:
# Get console width and account for padding
tasks.append(message)
live.update(self._generate_table(current_step, tasks[-5:]))
elif error:
live.stop()
self.console.failure(message["error"])
for task in tasks:
self.console.print(task, style=None)
exit(1)
def build(self):
"""Builds a Docker image provided a context"""
self._print_build_banner()
# Sets the target name, also know as the image tag
target_name = self.target_image.local_name
# Tell the API to build an image (returns a generator with log output)
build = self.client.api.build(path=self.context, tag=target_name, decode=True)
# Let the parser determine what to print
self.parse_output(build)
# If the process hasn't exited, it's likely a success
self.console.print_status(
"Successfully built the docker image from [blue]./container/Dockerfile[/blue]"
)
self.console.print_status(
f"You can access the image with Docker CLI as [blue]{target_name}[/blue]"
) | /saloon-0.1.2.tar.gz/saloon-0.1.2/cli/container/build.py | 0.516839 | 0.20466 | build.py | pypi |
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in pytest-shell-utilities project and our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at oss-coc@@vmware.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.
| /salt-analytics-framework-0.5.0.tar.gz/salt-analytics-framework-0.5.0/CODE_OF_CONDUCT.md | 0.572962 | 0.712157 | CODE_OF_CONDUCT.md | pypi |
from __future__ import annotations
import asyncio
import logging
from asyncio import Task
from typing import TYPE_CHECKING
from typing import TypeVar
import aiorun
from saf.pipeline import Pipeline
if TYPE_CHECKING:
from saf.models import AnalyticsConfig
log = logging.getLogger(__name__)
MN = TypeVar("MN", bound="Manager")
class Manager:
"""
Pipelines Manager.
"""
def __init__(self: MN, config: AnalyticsConfig) -> None:
self.config = config
self.pipelines: dict[str, Pipeline] = {}
for name, pipeline_config in config.pipelines.items():
self.pipelines[name] = Pipeline(name, pipeline_config)
self.pipeline_tasks: dict[str, Task] = {} # type: ignore[type-arg]
self.loop = asyncio.get_event_loop()
async def run(self: MN) -> None:
"""
Async entry point to run the pipelines.
"""
await self.start_pipelines()
try:
while True:
try:
await asyncio.sleep(0.05)
except (KeyboardInterrupt, asyncio.CancelledError):
break
finally:
await aiorun.shutdown_waits_for(self.stop_pipelines())
async def await_stopped(self: MN) -> None:
"""
Wait until all pipelines have been stopped.
"""
await self.stop_pipelines()
async def start_pipelines(self: MN) -> None:
"""
Start the pipelines.
"""
for name in self.pipelines:
result = await self.start_pipeline(name)
if result is not None:
log.warning(result)
async def stop_pipelines(self: MN) -> None:
"""
Stop the pipelines.
"""
for name in list(self.pipeline_tasks):
result = await self.stop_pipeline(name)
if result is not None:
log.warning(result)
async def start_pipeline(self: MN, name: str) -> str | None:
"""
Start a pipeline by name.
"""
log.info("Starting pipeline %r", name)
if name not in self.pipelines:
return f"Cannot start unknown pipeline {name!r}"
pipeline = self.pipelines[name]
if pipeline.config.enabled is False:
return f"Pipeline {name!r} is disabled, skipping start."
if name in self.pipeline_tasks:
return f"Pipeline {name!r} is already running"
pipeline.__enter__()
self.pipeline_tasks[name] = self.loop.create_task(pipeline.run())
return None
async def stop_pipeline(self: MN, name: str) -> str | None:
"""
Stop a pipeline by name.
"""
log.info("Stopping pipeline %r", name)
if name not in self.pipeline_tasks:
return f"Pipeline {name!r} is not running. Not stopping it."
task = self.pipeline_tasks.pop(name)
if task.done() is not True:
task.cancel()
await task
pipeline = self.pipelines[name]
pipeline.__exit__()
return None | /salt-analytics-framework-0.5.0.tar.gz/salt-analytics-framework-0.5.0/src/saf/manager.py | 0.792705 | 0.151028 | manager.py | pypi |
from __future__ import annotations
import logging
from datetime import datetime
from datetime import timezone
from typing import TYPE_CHECKING
from typing import Any
from typing import Dict
from typing import Generic
from typing import List
from typing import Mapping
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from pydantic import BaseModel
from pydantic import Field
from pydantic import PrivateAttr
from pydantic import validator
from pydantic.generics import GenericModel
from saf.plugins import PluginsList
from saf.utils import dt
if TYPE_CHECKING:
from types import ModuleType
log = logging.getLogger(__name__)
class NonMutableModel(BaseModel):
"""
Base class for non mutable models.
"""
class Config:
allow_mutation = False
NMC = TypeVar("NMC", bound="NonMutableConfig")
class NonMutableConfig(BaseModel):
"""
Base class for non-mutable configurations.
"""
_parent: AnalyticsConfig = PrivateAttr()
@property
def parent(self: NMC) -> AnalyticsConfig:
"""
Return the parent configuration schema.
"""
return self._parent
class Config:
allow_mutation = False
underscore_attrs_are_private = True
PCMI = TypeVar("PCMI", bound="PluginConfigMixin")
class PluginConfigMixin(NonMutableConfig):
"""
Base class for plugin configuration schemas.
"""
plugin: str
_name: str = PrivateAttr()
@property
def name(self: PCMI) -> str:
"""
Return the plugin name as defined in the configuration file.
"""
return self._name
@property
def loaded_plugin(self: PCMI) -> ModuleType:
"""
Return the plugin instance(module) for which this configuration refers to.
"""
raise NotImplementedError
CCB = TypeVar("CCB", bound="CollectConfigBase")
class CollectConfigBase(PluginConfigMixin):
"""
Base config schema for collect plugins.
"""
def __new__(
cls: Type[CCB],
plugin: str,
**kwargs: Dict[str, Any],
) -> CollectConfigBase:
"""
Swap the ``cls`` to instantiate if necessary.
If the targeted plugin provides a ``get_config_schema`` function, then this
class instance will use that class instead of the default one
"""
try:
plugin_module = PluginsList.instance().collectors[plugin]
try:
get_schema_func = plugin_module.get_config_schema
cls = get_schema_func() # pylint: disable=self-cls-assignment
except AttributeError:
log.debug(
"The %r collect plugin does not provide a 'get_config_schema' function, defaulting to %s",
plugin,
cls,
)
except KeyError:
pass
instance: CollectConfigBase = PluginConfigMixin.__new__(cls)
return instance
@property
def loaded_plugin(self: CCB) -> ModuleType:
"""
Return the plugin instance(module) for which this configuration refers to.
"""
return PluginsList.instance().collectors[self.plugin]
PCB = TypeVar("PCB", bound="ProcessConfigBase")
class ProcessConfigBase(PluginConfigMixin):
"""
Base config schema for process plugins.
"""
def __new__(
cls: Type[PCB],
plugin: str,
**kwargs: Dict[str, Any],
) -> ProcessConfigBase:
"""
Swap the ``cls`` to instantiate if necessary.
If the targeted plugin provides a ``get_config_schema`` function, then this
class instance will use that class instead of the default one
"""
try:
plugin_module = PluginsList.instance().processors[plugin]
try:
get_schema_func = plugin_module.get_config_schema
cls = get_schema_func() # pylint: disable=self-cls-assignment
except AttributeError:
log.debug(
"The %r process plugin does not provide a 'get_config_schema' function, defaulting to %s",
plugin,
cls,
)
except KeyError:
pass
instance: ProcessConfigBase = PluginConfigMixin.__new__(cls)
return instance
@property
def loaded_plugin(self: PCB) -> ModuleType:
"""
Return the plugin instance(module) for which this configuration refers to.
"""
return PluginsList.instance().processors[self.plugin]
FCB = TypeVar("FCB", bound="ForwardConfigBase")
class ForwardConfigBase(PluginConfigMixin):
"""
Base config schema for forward plugins.
"""
def __new__(
cls: Type[FCB],
plugin: str,
**kwargs: Dict[str, Any],
) -> ForwardConfigBase:
"""
Swap the ``cls`` to instantiate if necessary.
If the targeted plugin provides a ``get_config_schema`` function, then this
class instance will use that class instead of the default one
"""
try:
plugin_module = PluginsList.instance().forwarders[plugin]
try:
get_schema_func = plugin_module.get_config_schema
cls = get_schema_func() # pylint: disable=self-cls-assignment
except AttributeError:
log.debug(
"The %r forward plugin does not provide a 'get_config_schema' function, defaulting to %s",
plugin,
cls,
)
except KeyError:
pass
instance: ForwardConfigBase = PluginConfigMixin.__new__(cls)
return instance
@property
def loaded_plugin(self: FCB) -> ModuleType:
"""
Return the plugin instance(module) for which this configuration refers to.
"""
return PluginsList.instance().forwarders[self.plugin]
PC = TypeVar("PC", bound="PipelineConfig")
class PipelineConfig(NonMutableConfig):
"""
Base config schema for pipeline configuration.
"""
collect: List[str]
process: List[str] = Field(default_factory=list)
forward: List[str]
enabled: bool = True
restart: bool = True
_name: str = PrivateAttr()
@property
def name(self: PC) -> str:
"""
Return the pipeline name as defined in the configuration file.
"""
return self._name
AC = TypeVar("AC", bound="AnalyticsConfig")
class AnalyticsConfig(BaseModel):
"""
Salt Analytics Framework configuration.
"""
collectors: Dict[str, CollectConfigBase]
processors: Dict[str, ProcessConfigBase] = Field(default_factory=dict)
forwarders: Dict[str, ForwardConfigBase]
pipelines: Dict[str, PipelineConfig]
salt_config: Dict[str, Any]
@validator("pipelines", pre=True)
@classmethod
def _validate_pipelines(
cls: Type[AC], pipelines: Dict[str, Dict[str, Any]]
) -> Dict[str, Dict[str, Any]]:
for name, data in pipelines.items():
collect = data["collect"]
if isinstance(collect, str):
collect = [collect]
process = data.get("process")
forward = data["forward"]
if process is None:
process = []
elif isinstance(process, str):
process = [process]
if isinstance(forward, str):
forward = [forward]
pipelines[name]["collect"] = collect
pipelines[name]["process"] = process
pipelines[name]["forward"] = forward
pipelines[name].setdefault("enabled", True)
return pipelines
def _init_private_attributes(self: AC) -> None:
"""
Set the `_parent` attribute on child schemas.
"""
super()._init_private_attributes()
# Allow plugin configurations to access the full configuration, this instance
for entry in (self.collectors, self.processors, self.forwarders, self.pipelines):
if entry is None:
continue
for name, config in entry.items(): # type: ignore[attr-defined]
config._name = name # pylint: disable=protected-access
config._parent = self # pylint: disable=protected-access
class CollectedEvent(BaseModel):
"""
Class representing each of the collected events.
"""
data: Mapping[str, Any]
timestamp: Optional[datetime] = Field(default_factory=dt.utcnow)
SE = TypeVar("SE", bound="SaltEvent")
class SaltEvent(NonMutableModel):
"""
Class representing an event from Salt's event bus.
"""
tag: str
stamp: datetime
data: Dict[str, Any]
raw_data: Dict[str, Any]
@staticmethod
def _convert_stamp(stamp: str) -> datetime:
_stamp: datetime
try:
_stamp = datetime.fromisoformat(stamp).replace(tzinfo=timezone.utc)
except AttributeError: # pragma: no cover
# Python < 3.7
_stamp = datetime.strptime(stamp, "%Y-%m-%dT%H:%M:%S.%f").replace(tzinfo=timezone.utc)
return _stamp
@validator("stamp")
@classmethod
def _validate_stamp(cls: Type[SE], value: Union[str, datetime]) -> datetime:
if isinstance(value, datetime):
return value
return SaltEvent._convert_stamp(value)
PipelineRunContextConfigType = TypeVar("PipelineRunContextConfigType", bound=NonMutableConfig)
class PipelineRunContext(GenericModel, Generic[PipelineRunContextConfigType]):
"""
Class representing a pipeline run context.
"""
config: PipelineRunContextConfigType
cache: Dict[str, Any] = Field(default_factory=dict)
shared_cache: Dict[str, Any] = Field(default_factory=dict)
@property
def pipeline_config(self) -> AnalyticsConfig: # noqa: ANN101
"""
Return the analytics configuration.
"""
return self.config.parent
@property
def salt_config(self) -> Dict[str, Any]: # noqa: ANN101
"""
Return the salt configuration.
"""
config: Dict[str, Any] = self.config.parent.salt_config
return config | /salt-analytics-framework-0.5.0.tar.gz/salt-analytics-framework-0.5.0/src/saf/models.py | 0.892445 | 0.18374 | models.py | pypi |
from __future__ import annotations
import functools
import logging
import re
from typing import Any
from typing import AsyncIterator
from typing import Dict
from typing import Match
from typing import Optional
from typing import Type
from typing import TypeVar
from pydantic import Field
from saf.models import CollectedEvent
from saf.models import PipelineRunContext
from saf.models import ProcessConfigBase
log = logging.getLogger(__name__)
RegexProcessObject = TypeVar("RegexProcessObject")
class RegexMaskProcessConfig(ProcessConfigBase):
"""
Configuration schema for the regex mask processor plugin.
"""
rules: Dict[str, str]
mask_char: Optional[str] = Field(min_length=1, max_length=1)
mask_prefix: str = "<:"
mask_suffix: str = ":>"
def get_config_schema() -> Type[ProcessConfigBase]:
"""
Get the regex mask processor plugin configuration schema.
"""
return RegexMaskProcessConfig
def _regex_mask(event_piece: str, config: RegexMaskProcessConfig) -> str:
"""
Go through the string and process based on regex rules.
"""
def repl_fn(rule_name: str, match: Match[Any]) -> str:
"""
The replacement function to be called on each match.
If a mask_char was provided, use that with matching length.
Otherwise, use the rule name surrounded by prefix and suffix.
"""
if config.mask_char:
matched_str = match.group(0)
return config.mask_char * len(matched_str)
return f"{config.mask_prefix}{rule_name}{config.mask_suffix}"
orig_str = event_piece
try:
for rule_name, pattern in config.rules.items():
event_piece = re.sub(pattern, functools.partial(repl_fn, rule_name), event_piece)
except Exception:
log.exception("Failed to mask value '%s'", orig_str)
return event_piece
def _regex_process(
obj: str | list[Any] | tuple[Any, ...] | set[Any] | Dict[str, Any],
config: RegexMaskProcessConfig,
) -> str | list[Any] | tuple[Any, ...] | set[Any] | Dict[str, Any]:
"""
Recursive method to iterate over dictionary and apply rules to all str values.
"""
# Iterate over all attributes of obj.
# If string, do mask.
# If dict, set, tuple, or list -> recurse.
if isinstance(obj, str):
return _regex_mask(obj, config)
if isinstance(obj, (list, tuple, set)):
klass = type(obj)
return klass(_regex_process(i, config) for i in obj)
if isinstance(obj, dict):
for key, value in obj.items():
obj[key] = _regex_process(value, config)
return obj
async def process(
*,
ctx: PipelineRunContext[RegexMaskProcessConfig],
event: CollectedEvent,
) -> AsyncIterator[CollectedEvent]:
"""
Method called to mask the data based on provided regex rules.
"""
config = ctx.config
log.debug("Processing event in regex_mask: %s", event.json())
event_dict = event.dict()
processed_event_dict = _regex_process(event_dict, config)
yield event.parse_obj(processed_event_dict) | /salt-analytics-framework-0.5.0.tar.gz/salt-analytics-framework-0.5.0/src/saf/process/regex_mask.py | 0.778691 | 0.175079 | regex_mask.py | pypi |
from __future__ import annotations
import logging
import math
import string
from typing import Any
from typing import AsyncIterator
from typing import Optional
from typing import Type
from pydantic import Field
from saf.models import CollectedEvent
from saf.models import PipelineRunContext
from saf.models import ProcessConfigBase
log = logging.getLogger(__name__)
class ShannonMaskProcessConfig(ProcessConfigBase):
"""
Configuration schema for the Shannon mask processor plugin.
"""
mask_str: str = "HIGH-ENTROPY"
mask_char: Optional[str] = Field(min_length=1, max_length=1)
mask_prefix: str = "<:"
mask_suffix: str = ":>"
h_threshold: float = Field(0.9, ge=0.0, le=1.0)
length_threshold: int = Field(16, gt=1)
delimeter: str = Field(" ", min_length=1, max_length=1)
alphabet: str = Field(f"{string.ascii_letters}{string.digits}+/=", min_length=1)
def get_config_schema() -> Type[ProcessConfigBase]:
"""
Get the Shannon mask processor plugin configuration schema.
"""
return ShannonMaskProcessConfig
def _calculate_normalized_shannon_index(word: str, alphabet: str) -> float:
"""
Calculate a length-relative normalized Shannon index of the event_piece.
Shannon Diversity index: https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/shannon.htm
"""
# pylint: disable=invalid-name
word_len = len(word)
alphabet_len = len(alphabet)
p_dict = {i: word.count(i) / word_len for i in word if i in alphabet}
# h is the standard Shannon Index naming convention
h = sum(-1 * p_i * math.log(p_i) for p_i in p_dict.values())
# Quotient-Remainder Thm: We have integers d, r such that
# len(word) = d * len(alphabet) + r where 0 <= r < len(alphabet)
# We can use this relationship to find h_max for a given string length.
d = word_len // alphabet_len
r = word_len % alphabet_len
p_r = (d + 1) / word_len
p_d = d / word_len
h_max = -(r * p_r * math.log(p_r)) - ((alphabet_len - r) * p_d * math.log(p_d))
return h / h_max
# pylint: enable=invalid-name
def _shannon_mask(event_piece: str, config: ShannonMaskProcessConfig) -> str:
"""
Go through the string and process based on normalized Shannon index values.
"""
def repl_fn(word: str) -> str:
"""
The replacement function to be called on each match.
If a mask_char was provided, use that with matching length.
Otherwise, use the config.mask_str surrounded by prefix and suffix.
"""
if config.mask_char:
return config.mask_char * len(word)
return f"{config.mask_prefix}{config.mask_str}{config.mask_suffix}"
orig_str = event_piece
try:
split_piece = event_piece.split(config.delimeter)
for word in split_piece:
if len(word) >= config.length_threshold:
h_norm = _calculate_normalized_shannon_index(word, config.alphabet)
if h_norm > config.h_threshold:
event_piece = event_piece.replace(word, repl_fn(word))
except Exception:
log.exception("Failed to mask value '%s'", orig_str)
return event_piece
def _shannon_process(obj: Any, config: ShannonMaskProcessConfig) -> Any: # noqa: ANN401
"""
Recursive method to iterate over dictionary and apply rules to all str values.
"""
# Iterate over all attributes of obj. If string, do mask. If dict, recurse. Else, do nothing.
if isinstance(obj, str):
return _shannon_mask(obj, config)
if isinstance(obj, (list, tuple, set)):
klass = type(obj)
return klass(_shannon_process(i, config) for i in obj)
if isinstance(obj, dict):
for key, value in obj.items():
obj[key] = _shannon_process(value, config)
return obj
async def process(
*,
ctx: PipelineRunContext[ShannonMaskProcessConfig],
event: CollectedEvent,
) -> AsyncIterator[CollectedEvent]:
"""
Method called to mask the data based on normalized Shannon index values.
"""
config = ctx.config
log.debug("Processing event in shannon_mask: %s", event.json())
event_dict = event.dict()
processed_event_dict = _shannon_process(event_dict, config)
yield event.parse_obj(processed_event_dict) | /salt-analytics-framework-0.5.0.tar.gz/salt-analytics-framework-0.5.0/src/saf/process/shannon_mask.py | 0.83772 | 0.229632 | shannon_mask.py | pypi |
from __future__ import annotations
import logging
import pathlib # noqa: TCH003
from typing import Any
from typing import AsyncIterator
from typing import Dict
from typing import List
from typing import Optional
from typing import Type
import papermill
from saf.models import CollectedEvent
from saf.models import PipelineRunContext
from saf.models import ProcessConfigBase
log = logging.getLogger(__name__)
class JupyterNotebookConfig(ProcessConfigBase):
"""
Configuration schema for the jupyter notebook processor plugin.
"""
notebook: pathlib.Path
output_notebook: Optional[pathlib.Path]
params: Dict[str, Any] = {}
papermill_kwargs: Dict[str, Any] = {}
output_tag: Optional[str]
input_keys: List[str]
def get_config_schema() -> Type[ProcessConfigBase]:
"""
Get the jupyter notebook processor plugin configuration schema.
"""
return JupyterNotebookConfig
async def process(
*,
ctx: PipelineRunContext[JupyterNotebookConfig],
event: CollectedEvent,
) -> AsyncIterator[CollectedEvent]:
"""
Run the jupyter notebook, doing papermill parameterizing using the event data given.
"""
output = ctx.config.output_notebook or ctx.config.notebook
params = ctx.config.params.copy()
for key in ctx.config.input_keys:
params[key] = event.data[key]
notebook = papermill.execute_notebook(
str(ctx.config.notebook),
str(output),
parameters=params,
**ctx.config.papermill_kwargs,
)
# Now let's find the cell with the output
# If no output tag is given, we resort to the last cell
cells = notebook.cells
if ctx.config.output_tag:
for cell in cells:
if ctx.config.output_tag in cell.metadata.tags:
notebook_output = cell.outputs
break
else:
notebook_output = cells[-1].outputs
trimmed_outputs = []
for out in notebook_output:
if out.output_type == "execute_result":
trimmed_outputs.append(out)
event.data = {"trimmed_outputs": trimmed_outputs}
yield event | /salt-analytics-framework-0.5.0.tar.gz/salt-analytics-framework-0.5.0/src/saf/process/jupyter_notebook.py | 0.877332 | 0.218753 | jupyter_notebook.py | pypi |
# Import Python Libs
from __future__ import absolute_import, print_function, unicode_literals
import datetime
import logging
import pprint
import time
# Import Salt Libs
import salt.config as config
from salt.exceptions import SaltCloudException, SaltCloudSystemExit
# Get logging started
log = logging.getLogger(__name__)
__virtualname__ = "kamatera"
# Only load in this module if the Kamatera configurations are in place
def __virtual__():
"""
Check for Linode configs.
"""
if get_configured_provider() is False:
return False
return __virtualname__
def get_configured_provider():
"""
Return the first configured instance.
"""
return config.is_provider_configured(
__opts__,
__active_provider_name__ or __virtualname__,
("api_client_id", "api_secret",),
)
def avail_images(call=None):
"""
Return available Kamatera images for a given location.
CLI Example:
.. code-block:: bash
salt-cloud --list-images my-kamatera-config --location=EU
salt-cloud -f avail_images my-kamatera-config --location=EU
"""
if call == "action":
raise SaltCloudException(
"The avail_images function must be called with -f or --function."
)
elif not __opts__.get("location"):
raise SaltCloudException(
"A location must be specified using --location=LOCATION"
)
else:
return {
image["id"]: image["name"]
for image in _request(
"service/server?images=1&datacenter={0}".format(__opts__["location"])
)
}
def avail_sizes(call=None):
"""
Return available Kamatera CPU types for a given location.
CLI Example:
.. code-block:: bash
salt-cloud --list-sizes my-kamatera-config --location=EU
salt-cloud -f avail_sizes my-kamatera-config --location=EU
"""
if call == "action":
raise SaltCloudException(
"The avail_sizes function must be called with -f or --function."
)
elif not __opts__.get("location"):
raise SaltCloudException(
"A location must be specified using --location=LOCATION"
)
else:
return {
cpuType["id"]: {
k: (str(v) if k in ["ramMB", "cpuCores"] else v)
for k, v in cpuType.items()
if k != "id"
}
for cpuType in _request(
"service/server?capabilities=1&datacenter={0}".format(
__opts__["location"]
)
)["cpuTypes"]
}
def avail_server_options(kwargs=None, call=None):
"""
Return available Kamatera server options for a given location.
CLI Example:
.. code-block:: bash
salt-cloud -f avail_server_options my-kamatera-config --location=EU
"""
if call != "function":
raise SaltCloudException(
"The avail_server_options function must be called with -f or --function."
)
elif not __opts__.get("location"):
raise SaltCloudException(
"A location must be specified using --location=LOCATION"
)
else:
return {
k: (str(v) if k == "diskSizeGB" else v)
for k, v in _request(
"service/server?capabilities=1&datacenter={0}".format(
__opts__["location"]
)
).items()
if k not in ["cpuTypes", "defaultMonthlyTrafficPackage"]
}
def avail_locations(call=None):
"""
Return available Kamatera datacenter locations.
CLI Example:
.. code-block:: bash
salt-cloud --list-locations my-kamatera-config
salt-cloud -f avail_locations my-kamatera-config
"""
if call == "action":
raise SaltCloudException(
"The avail_locations function must be called with -f or --function."
)
else:
return {
datacenter.pop("id"): "{0}, {1} ({2})".format(
datacenter["subCategory"], datacenter["name"], datacenter["category"]
)
for datacenter in _request("service/server?datacenter=1")
}
def create(vm_):
"""
Create a single Kamatera server.
"""
name = vm_["name"]
profile = vm_.get("profile")
if not profile or not config.is_profile_configured(
__opts__, __active_provider_name__ or "kamatera", vm_["profile"], vm_=vm_
):
return False
__utils__["cloud.fire_event"](
"event",
"starting create",
"salt/cloud/{0}/creating".format(name),
args=__utils__["cloud.filter_event"](
"creating", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
log.info("Creating Cloud VM %s", name)
def _getval(key, default=None):
val = config.get_cloud_config_value(key, vm_, __opts__, default=None)
if not val and default is None:
raise SaltCloudException("missing required profile option: {0}".format(key))
else:
return val or default
request_data = {
"name": name,
"password": _getval("password", "__generate__"),
"passwordValidate": _getval("password", "__generate__"),
"ssh-key": _getval("ssh_pub_key", ""),
"datacenter": _getval("location"),
"image": _getval("image"),
"cpu": "{0}{1}".format(_getval("cpu_cores"), _getval("cpu_type")),
"ram": _getval("ram_mb"),
"disk": " ".join(
[
"size={0}".format(disksize)
for disksize in [_getval("disk_size_gb")]
+ _getval("extra_disk_sizes_gb", [])
]
),
"dailybackup": "yes" if _getval("daily_backup", False) else "no",
"managed": "yes" if _getval("managed", False) else "no",
"network": " ".join(
[
",".join(["{0}={1}".format(k, v) for k, v in network.items()])
for network in _getval("networks", [{"name": "wan", "ip": "auto"}])
]
),
"quantity": 1,
"billingcycle": _getval("billing_cycle", "hourly"),
"monthlypackage": _getval("monthly_traffic_package", ""),
"poweronaftercreate": "yes",
}
response = _request("service/server", "POST", request_data)
if not _getval("password", ""):
command_ids = response["commandIds"]
generated_password = response["password"]
else:
command_ids = response
generated_password = None
if len(command_ids) != 1:
raise SaltCloudException("invalid Kamatera response")
command_id = command_ids[0]
__utils__["cloud.fire_event"](
"event",
"requesting instance",
"salt/cloud/{0}/requesting".format(name),
args=__utils__["cloud.filter_event"](
"requesting", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
command = _wait_command(command_id, _getval)
create_log = command["log"]
try:
created_at = datetime.datetime.strptime(
command.get("completed") or "", "%Y-%m-%d %H:%M:%S"
)
except ValueError:
created_at = None
name_lines = [line for line in create_log.split("\n") if line.startswith("Name: ")]
if len(name_lines) != 1:
raise SaltCloudException("invalid create log: " + create_log)
created_name = name_lines[0].replace("Name: ", "")
tmp_servers = _list_servers(name_regex=created_name)
if len(tmp_servers) != 1:
raise SaltCloudException("invalid list servers response")
server = tmp_servers[0]
server["extra"]["create_command"] = command
server["extra"]["created_at"] = created_at
server["extra"]["generated_password"] = generated_password
public_ips = []
private_ips = []
for network in server["networks"]:
if network.get("network").startswith("wan-"):
public_ips += network.get("ips", [])
else:
private_ips += network.get("ips", [])
data = dict(
image=_getval("image"),
name=server["name"],
size="{0}{1}-{2}mb-{3}gb".format(
server["cpu_cores"],
server["cpu_type"],
server["ram_mb"],
server["disk_size_gb"],
),
state=server["state"],
private_ips=private_ips,
public_ips=public_ips,
)
# Pass the correct IP address to the bootstrap ssh_host key
vm_["ssh_host"] = data["public_ips"][0]
# If a password wasn't supplied in the profile or provider config, set it now.
vm_["password"] = _getval("password", generated_password)
# Make public_ips and private_ips available to the bootstrap script.
vm_["public_ips"] = public_ips
vm_["private_ips"] = private_ips
# Send event that the instance has booted.
__utils__["cloud.fire_event"](
"event",
"waiting for ssh",
"salt/cloud/{0}/waiting_for_ssh".format(name),
sock_dir=__opts__["sock_dir"],
args={"ip_address": vm_["ssh_host"]},
transport=__opts__["transport"],
)
# Bootstrap!
ret = __utils__["cloud.bootstrap"](vm_, __opts__)
ret.update(data)
log.info("Created Cloud VM '%s'", name)
log.debug("'%s' VM creation details:\n%s", name, pprint.pformat(data))
__utils__["cloud.fire_event"](
"event",
"created instance",
"salt/cloud/{0}/created".format(name),
args=__utils__["cloud.filter_event"](
"created", vm_, ["name", "profile", "provider", "driver"]
),
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return ret
def destroy(name, call=None):
"""
Destroys a Kamatera server by name.
name
The name of server to be be destroyed.
CLI Example:
.. code-block:: bash
salt-cloud -d server_name
"""
if call == "function":
raise SaltCloudException(
"The destroy action must be called with -d, --destroy, " "-a or --action."
)
__utils__["cloud.fire_event"](
"event",
"destroying instance",
"salt/cloud/{0}/destroying".format(name),
args={"name": name},
sock_dir=__opts__["sock_dir"],
transport=__opts__["transport"],
)
return _server_operation(name, "terminate")
def list_nodes(call=None, full=False, name_regex=None):
"""
Returns a list of servers, keeping only a brief listing.
CLI Example:
.. code-block:: bash
salt-cloud -Q
salt-cloud --query
salt-cloud -f list_nodes my-kamatera-config
"""
if call == "action":
raise SaltCloudException(
"The list_nodes function must be called with -f or --function."
)
ret = {}
for server_res in _list_servers(name_regex=name_regex):
public_ips, private_ips, networks = [], [], []
for network in server_res.pop("networks"):
networks.append(network["network"])
if network["network"].startswith("wan-"):
public_ips += network["ips"]
else:
private_ips += network["ips"]
name = server_res.pop("name")
server = {
"id": server_res.pop("id"),
"image": "",
"size": "{0}{1}-{2}mb-{3}gb".format(
server_res.pop("cpu_cores"),
server_res.pop("cpu_type"),
server_res.pop("ram_mb"),
server_res.pop("disk_size_gb"),
),
"state": server_res.pop("state"),
"private_ips": private_ips,
"public_ips": public_ips,
}
if full:
server_res["networks"] = networks
for k, v in server_res.pop("extra", {}).items():
server_res[k] = v
server["extra"] = server_res
ret[name] = server
return ret
def list_nodes_full(call=None):
"""
List Kamatera servers, with all available information.
CLI Example:
.. code-block:: bash
salt-cloud -F
salt-cloud --full-query
salt-cloud -f list_nodes_full my-kamatera-config
"""
if call == "action":
raise SaltCloudException(
"The list_nodes_full function must be called with -f or --function."
)
return list_nodes(full=True)
def list_nodes_min(call=None):
"""
Return a list of the VMs that are on the provider. Only a list of VM names and
their state is returned. This is the minimum amount of information needed to
check for existing VMs.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt-cloud -f list_nodes_min my-kamatera-config
salt-cloud --function list_nodes_min my-kamatera-config
"""
if call == "action":
raise SaltCloudSystemExit(
"The list_nodes_min function must be called with -f or --function."
)
ret = {}
for server in _request("/service/servers"):
ret[server["name"]] = server
return ret
def list_nodes_select(call=None):
"""
Return a list of the servers that are on the provider, with select fields
"""
return __utils__["cloud.list_nodes_select"](
list_nodes_full(), __opts__["query.selection"], call,
)
def reboot(name, call=None):
"""
Reboot a Kamatera server.
.. versionadded:: 2015.8.0
name
The name of the server to reboot.
CLI Example:
.. code-block:: bash
salt-cloud -a reboot server_name
"""
if call != "action":
raise SaltCloudException(
"The show_instance action must be called with -a or --action."
)
return _server_operation(name, "reboot")
def start(name, call=None):
"""
Start a Kamatera server.
.. versionadded:: 2015.8.0
name
The name of the server to start.
CLI Example:
.. code-block:: bash
salt-cloud -a start server_name
"""
if call != "action":
raise SaltCloudException(
"The show_instance action must be called with -a or --action."
)
return _server_operation(name, "poweron")
def stop(name, call=None):
"""
Stop a Kamatera server.
.. versionadded:: 2015.8.0
name
The name of the server to stop.
CLI Example:
.. code-block:: bash
salt-cloud -a stop server_name
"""
if call != "action":
raise SaltCloudException(
"The show_instance action must be called with -a or --action."
)
return _server_operation(name, "poweroff")
def show_instance(name, call=None):
"""
Displays details about a specific Kamatera server
.. versionadded:: 2015.8.0
name
Server name
CLI Example:
.. code-block:: bash
salt-cloud -a show_instance server_name
"""
if call != "action":
raise SaltCloudException(
"The show_instance action must be called with -a or --action."
)
return list_nodes(full=True, name_regex=name)[name]
def _request(path, method="GET", request_data=None):
"""Make a web call to the Kamatera API."""
vm_ = get_configured_provider()
api_client_id = config.get_cloud_config_value(
"api_client_id", vm_, __opts__, search_global=False,
)
api_secret = config.get_cloud_config_value(
"api_secret", vm_, __opts__, search_global=False,
)
api_url = config.get_cloud_config_value(
"api_url",
vm_,
__opts__,
search_global=False,
default="https://cloudcli.cloudwm.com",
)
if not api_url:
api_url = "https://cloudcli.cloudwm.com"
url = api_url.strip("/") + "/" + path.strip("/")
headers = dict(
AuthClientId=api_client_id, AuthSecret=api_secret, Accept="application/json"
)
headers["Content-Type"] = "application/json"
headers["X-CLOUDCLI-STATUSINJSON"] = "true"
result = __utils__["http.query"](
url,
method,
data=__utils__["json.dumps"](request_data)
if request_data is not None
else None,
header_dict=headers,
decode=True,
decode_type="json",
text=True,
status=True,
opts=__opts__,
)
if result["status"] != 200 or result.get("error") or not result.get("dict"):
if result.get("res"):
logging.error(result["res"])
raise SaltCloudException(
result.get("error") or "Unexpected response from Kamatera API"
)
elif result["dict"]["status"] != 200:
try:
message = result["dict"]["response"].pop("message")
except KeyError:
message = "Unexpected response from Kamatera API (status={0})".format(
result["dict"]["status"]
)
logging.error(result["dict"]["response"])
raise SaltCloudException(message)
else:
return result["dict"]["response"]
def _get_command_status(command_id):
"""Get a Kamatera command status"""
response = _request("/service/queue?id=" + str(command_id))
if len(response) != 1:
raise SaltCloudException("invalid response for command id " + str(command_id))
return response[0]
def _wait_command(command_id, _getval=None):
"""Wait for Kamatera command to complete and return the status"""
if not _getval:
_getval = lambda key, default: config.get_cloud_config_value(
key, {}, __opts__, default
)
wait_poll_interval_seconds = _getval("wait_poll_interval_seconds", 2)
wait_timeout_seconds = _getval("wait_timeout_seconds", 600)
start_time = datetime.datetime.now()
max_time = start_time + datetime.timedelta(seconds=wait_timeout_seconds)
time.sleep(wait_poll_interval_seconds)
while True:
if max_time < datetime.datetime.now():
raise SaltCloudException(
"Timeout waiting for command (timeout_seconds={0}, command_id={1})".format(
str(wait_timeout_seconds), str(command_id)
)
)
time.sleep(wait_poll_interval_seconds)
command = _get_command_status(command_id)
status = command.get("status")
if status == "complete":
return command
elif status == "error":
raise SaltCloudException("Command failed: " + command.get("log"))
def _list_servers(name_regex=None, names=None):
"""list Kamatera servers base on regex of server names or specific list of names"""
request_data = {"allow-no-servers": True}
if names:
servers = []
for name in names:
for server in _list_servers(name_regex=name):
servers.append(server)
return servers
else:
if not name_regex:
name_regex = ".*"
request_data["name"] = name_regex
res = _request("/service/server/info", method="POST", request_data=request_data)
return list(map(_get_server, res))
def _get_server(server):
"""get Kamatera server details in a standard structure"""
server_cpu = server.pop("cpu")
server_disk_sizes = server.pop("diskSizes")
res_server = dict(
id=server.pop("id"),
name=server.pop("name"),
state="running" if server.pop("power") == "on" else "stopped",
datacenter=server.pop("datacenter"),
cpu_type=server_cpu[-1],
cpu_cores=int(server_cpu[:-1]),
ram_mb=int(server.pop("ram")),
disk_size_gb=int(server_disk_sizes[0]),
extra_disk_sizes_gb=list(map(int, server_disk_sizes[1:])),
networks=server.pop("networks"),
daily_backup=server.pop("backup") == "1",
managed=server.pop("managed") == "1",
billing_cycle=server.pop("billing"),
monthly_traffic_package=server.pop("traffic"),
price_monthly_on=server.pop("priceMonthlyOn"),
price_hourly_on=server.pop("priceHourlyOn"),
price_hourly_off=server.pop("priceHourlyOff"),
)
res_server["extra"] = server
return res_server
def _server_operation(name, operation):
"""Run custom operations on the server"""
state = _list_servers(name)[0]["state"]
if operation != "terminate" and state not in ["stopped", "running"]:
raise SaltCloudException(
"Invalid state for {0} operation: {1}".format(operation, state)
)
if (
(operation == "poweron" and state == "stopped")
or (operation == "poweroff" and state == "running")
or (operation == "reboot" and state == "running")
or operation == "terminate"
):
request_data = {"name": name}
if operation == "terminate":
request_data["force"] = True
command_id = _request(
"/service/server/{0}".format(operation), "POST", request_data
)[0]
_wait_command(command_id)
state = (
"destroyed" if operation == "terminate" else _list_servers(name)[0]["state"]
)
return {
"state": state,
"action": {"poweron": "start", "poweroff": "stop", "terminate": "destroy"}.get(
operation, operation
),
"success": (
((operation == "reboot" or operation == "poweron") and state == "running")
or (operation == "poweroff" and state == "stopped")
or operation == "terminate"
),
} | /salt_cloud_module_kamatera-0.0.3-py3-none-any.whl/salt_cloud_module_kamatera/clouds/kamatera.py | 0.673084 | 0.155944 | kamatera.py | pypi |
==============
Feature Matrix
==============
A number of features are available in most cloud providers, but not all are
available everywhere. This may be because the feature isn't supported by the
cloud provider itself, or it may only be that the feature has not yet been
added to Salt Cloud. In a handful of cases, it is because the feature does not
make sense for a particular cloud provider (Saltify, for instance).
This matrix shows which features are available in which cloud providers, as far
as Salt Cloud is concerned. This is not a comprehensive list of all features
available in all cloud providers, and shoult not be used to make business
decisions concerning choosing a cloud provider. In most cases, adding support
for a feature to Salt Cloud requires only a little effort.
Legacy Drivers
==============
Both AWS and Rackspace are listed as "Legacy". This is because those drivers
have been replaced by other drivers, which are generally the prerferred method
for working with those providers.
The EC2 driver should be used instead of the AWS driver, when possible. The
OpenStack driver should be used instead of the Rackspace driver, unless the user
is dealing with instances in "the old cloud" in Rackspace.
Note for Developers
===================
When adding new features to a particular cloud provider, please make sure to
add the feature to this table. Additionally, if you notice a feature that is not
properly listed here, pull requests to fix them is appreciated.
Standard Features
=================
These are features that are available for almost every provider.
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
| |AWS |CloudStack|Digital|EC2|GoGrid|IBM|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|
| |(Legacy)| |Ocean | | |SCE| | | | |(Legacy) | | |Hardware |
+=======================+========+==========+=======+===+======+===+======+======+=========+=========+=========+=======+=========+=========+
|Query |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|Full Query |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|Selective Query |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|List Sizes |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|List Images |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|List Locations |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|create |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes |Yes |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|destroy |Yes |Yes |Yes |Yes|Yes |Yes|Yes |Yes |Yes |Yes |Yes | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
Actions
=======
These are features that are performed on a specific instance, and require an
instance name to be passed in. For example:
.. code-block:: bash
# salt-cloud -a attach_volume ami.example.com
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|Actions |AWS |CloudStack|Digital|EC2|GoGrid|IBM|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|
| |(Legacy)| |Ocean | | |SCE| | | | |(Legacy) | | |Hardware |
+=======================+========+==========+=======+===+======+===+======+======+=========+=========+=========+=======+=========+=========+
|attach_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|create_attach_volumes |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|del_tags |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|delvol_on_destroy | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|detach_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|disable_term_protect |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|enable_term_protect |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_tags |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|keepvol_on_destroy | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|list_keypairs | | |Yes | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|rename |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|set_tags |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_delvol_on_destroy | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_instance | | |Yes |Yes| | | | | |Yes | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_term_protect | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|start |Yes | | |Yes| | |Yes | | |Yes | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|stop |Yes | | |Yes| | |Yes | | |Yes | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|take_action | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
Functions
=========
These are features that are performed against a specific cloud provider, and
require the name of the provider to be passed in. For example:
.. code-block:: bash
# salt-cloud -f list_images my_digitalocean
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|Functions |AWS |CloudStack|Digital|EC2|GoGrid|IBM|JoyEnt|Linode|OpenStack|Parallels|Rackspace|Saltify|Softlayer|Softlayer|
| |(Legacy)| |Ocean | | |SCE| | | | |(Legacy) | | |Hardware |
+=======================+========+==========+=======+===+======+===+======+======+=========+=========+=========+=======+=========+=========+
|block_device_mappings |Yes | | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|create_keypair | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|create_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|delete_key | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|delete_keypair | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|delete_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_image | | |Yes | | | |Yes | | |Yes | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_ip | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_key | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_keyid | | |Yes | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_keypair | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_networkid | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_node | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_password | |Yes | | | | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_size | | |Yes | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_spot_config | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|get_subnetid | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|iam_profile |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|import_key | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|key_list | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|keyname |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|list_availability_zones| | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|list_custom_images | | | | | | | | | | | | |Yes | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|list_keys | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|list_vlans | | | | | | | | | | | | |Yes |Yes |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|rackconnect | | | | | | | | |Yes | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|reboot | | | |Yes| | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|reformat_node | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|securitygroup |Yes | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|securitygroupid | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_image | | | |Yes| | | | | |Yes | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_key | | | | | | |Yes | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_keypair | | |Yes |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
|show_volume | | | |Yes| | | | | | | | | | |
+-----------------------+--------+----------+-------+---+------+---+------+------+---------+---------+---------+-------+---------+---------+
| /salt-cloud-0.8.11.tar.gz/salt-cloud-0.8.11/doc/topics/features.rst | 0.723212 | 0.801198 | features.rst | pypi |
from __future__ import unicode_literals
from salt.utils import clean_kwargs
def set_up(name, containers=None, volumes=None, host=None, host_root=None, repository=None, default_domain=None,
check_integrity=True, ignore_existing=False):
'''
Sets up a container map.
name
Name of the container map.
containers
Container configurations to load.
volumes
Volume aliases, each with the path of their container mount point.
host
Volume aliases, each with the path of their host mount point.
host_root
Host root path to prefix all host paths with, unless they are absolute paths.
repository
Default repository name to prefix image names with, unless they are specified with a different prefix.
default_domain
Default domain to set for new containers.
check_integrity : True
Whether to check the map for missing containers and volume aliases. Set to ``False`` if the map is to be
complemented with ``merged``.
ignore_existing : False
In case a map with the given name exists, this state will fail. Setting this to ``True`` overwrites existing
maps instead.
'''
res = __salt__['container_map.setup'](name, containers=containers, volumes=volumes, host=host, host_root=host_root,
repository=repository, default_domain=default_domain,
check_integrity=check_integrity, ignore_existing=ignore_existing)
res['name'] = res['item_id']
return res
def merged(name, target_map, containers=None, volumes=None, host=None, host_root=None, repository=None,
default_domain=None, lists_only=False):
'''
Merges the given map into an existing container map. This means that list-like properties are extended and
dictionaries on the target map are updated.
name
State name - has no effect.
target_map
Name of the container map to merge the following contents into.
containers
Container configurations to load.
volumes
Volume aliases, each with the path of their container mount point.
host
Volume aliases, each with the path of their host mount point.
host_root
Host root path to prefix all host paths with, unless they are absolute paths.
repository
Default repository name to prefix image names with, unless they are specified with a different prefix.
default_domain
Default domain to set for new containers.
lists_only : False
By default single-value properties (e.g. host_root) are overwritten on the target map. If set to ``True``,
these properties are ignored if they are already set on the target.
'''
res = __salt__['container_map.merge'](target_map, containers=containers, volumes=volumes, host=host,
host_root=host_root, repository=repository, default_domain=default_domain,
lists_only=lists_only)
res['name'] = res['item_id']
return res
def created(name, instances=None, map_name=None, extra_kwargs=None):
'''
Ensures that a container exists, along with its dependencies.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
extra_kwargs
Extra keyword arguments for the container creation.
'''
create_kwargs = extra_kwargs or {}
res = __salt__['container_map.create'](name, instances=instances, map_name=map_name, **create_kwargs)
res.update(name=name, instances=instances, map_name=map_name)
return res
def started(name, instances=None, map_name=None, extra_kwargs=None):
'''
Ensures that a container is started, along with its dependencies.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
extra_kwargs
Extra keyword arguments for the container start.
'''
start_kwargs = extra_kwargs or {}
res = __salt__['container_map.start'](name, instances=instances, map_name=map_name, **start_kwargs)
res.update(name=name, instances=instances, map_name=map_name)
return res
def restarted(name, instances=None, map_name=None, extra_kwargs=None):
'''
Restarts a container.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
extra_kwargs
Extra keyword arguments for the container restart.
'''
restart_kwargs = extra_kwargs or {}
res = __salt__['container_map.restart'](name, instances=instances, map_name=map_name, **restart_kwargs)
res.update(name=name, instances=instances, map_name=map_name)
return res
def stopped(name, instances=None, map_name=None, extra_kwargs=None):
'''
Ensures that a container is stopped, along with its dependent containers.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
extra_kwargs
Extra keyword arguments for the container stop.
'''
stop_kwargs = extra_kwargs or {}
res = __salt__['container_map.stop'](name, instances=instances, map_name=map_name, **stop_kwargs)
res.update(name=name, instances=instances, map_name=map_name)
return res
def removed(name, instances=None, map_name=None, extra_kwargs=None):
'''
Ensures that a container is removed, along with its dependent containers.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
extra_kwargs
Extra keyword arguments for the container removal.
'''
remove_kwargs = extra_kwargs or {}
res = __salt__['container_map.remove'](name, instances=instances, map_name=map_name, **remove_kwargs)
res.update(name=name, instances=instances, map_name=map_name)
return res
def started_up(name, instances=None, map_name=None):
'''
Ensures that a container exists and that it is started, along with its dependencies.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
'''
res = __salt__['container_map.startup'](name, instances=instances, map_name=map_name)
res.update(name=name, instances=instances, map_name=map_name)
return res
def shut_down(name, instances=None, map_name=None):
'''
Ensures that a container is stopped and removed, along with its dependent containers.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
'''
res = __salt__['container_map.shutdown'](name, instances=instances, map_name=map_name)
res.update(name=name, instances=instances, map_name=map_name)
return res
def updated(name, instances=None, map_name=None, reload_signal=None, send_signal=False, **kwargs):
'''
Ensures that a container is up-to-date, i.e.
* the image id corresponds with the image tag from the configuration
* the existing container still has access to all dependent volumes
* linked containers are available
* command, entrypoint, or environment have not been changed.
Non-existing containers are created and started. Outdated containers are removed and re-created and restarted along
the dependency path.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
reload_signal
Optional signal to send to the main process for reloading.
send_signal : False
Whether to send the ``reload_signal``. Set to ``True`` by the ``watch`` directive.
'''
if send_signal:
signal = reload_signal
else:
signal = None
res = __salt__['container_map.update'](name, instances=instances, map_name=map_name, reload_signal=signal)
res.update(name=name, instances=instances, map_name=map_name)
return res
def signaled(name, instances=None, map_name=None, signal=None):
"""
Sends a signal to a container. By default this is SIGKILL, but can be set to other signals, e.g. SIGHUP for
reloading configurations.
name
Container configuration name.
instances
Optional list of instance names.
map_name
Container map name.
signal
Signal name or number.
"""
res = __salt__['container_map.kill'](name, instances=instances, map_name=map_name, signal=signal)
res.update(name=name, instances=instances, map_name=map_name)
return res
def all_removed(name, **kwargs):
'''
Removes all containers from the host. Note this also applies to containers that are not on any map.
name
State name - has no effect.
kwargs
Keyword arguments forwarded to ``container_map.remove_all_containers``.
'''
res = __salt__['container_map.remove_all_containers'](**clean_kwargs(**kwargs))
res['name'] = '__all__'
return res
def containers_clean(name, include_initial=False, exclude=None):
'''
Removes all containers from the host which are not running, not attached volume containers, and not marked as
persistent. Note this also applies to containers that are not on any map.
name
State name - has no effect.
include_initial : False
If set to ``True``, also removes containers that have never been running.
exclude
List of container names or ids to exclude from the removal.
'''
res = __salt__['container_map.cleanup_containers'](include_initial=include_initial, exclude=exclude)
res['name'] = '__all__'
return res
def images_clean(name, remove_old=False, keep_tags=None):
'''
Removes all images from the host which are not in use by any container and have no tag. Optionally can also remove
images with a repository tag that is not ``latest``, or all tags which are not in the specified whitelist.
name
State name - has no effect.
remove_old : False
Remove images that have a tag, but not ``latest``. Does not affect additional (e.g. version) tags of ``latest``
images.
keep_tags
Remove images that have none of the specified tags.
'''
res = __salt__['container_map.cleanup_images'](remove_old=remove_old, keep_tags=keep_tags)
res['name'] = '__all__'
return res
def images_updated(name, map_name=None, utility_images=True, insecure_registry=False):
'''
Ensures that all images on a map are updated to their latest version or the specified tag.
name
State name - has no effect.
map_name
Container map name.
map_names
Multiple container map names. Can be used instead of, or in conjunction with ``map_name``.
utility_images : True
Unless set to ``False``, also updates utility images such as ``busybox`` and ``tianon/true``.
insecure_registry : False
Allow `insecure` registries for retrieving images.
'''
res = __salt__['container_map.pull_latest_images'](name, map_name=map_name,
utility_images=utility_images,
insecure_registry=insecure_registry)
res['name'] = map_name or '__base__'
return res
def logged_in(name, username=None, password=None, email=None, reauth=False):
'''
Ensures authentication to a registry. All parameters are optional, and in case they are no provided information
will be read from the ``docker-registries`` pillar, if available.
name
Registry name.
username
Login user name.
password
Login password.
email
Login email (optional in most cases).
reauth : False
Force re-authentication, even if authentication data has been cached for this registry.
'''
res = __salt__['container_map.login'](name, username=username, password=password, email=email, reauth=reauth)
res['name'] = name
return res
def built(name, **kwargs):
'''
name
Image tag to apply.
ignore_existing : False
Rebuild the image if it exists. Note this does not imply ``nocache``, so might not actually generate a new
image.
show_log : True
Return the build output.
source
Dockerfile source (e.g. ``salt://...`` for a file loaded from the master).
saltenv : 'base'
Salt environment to use for loading source files.
template
Template engine to use for the source file.
context:
Additional template context.
contents
The script can be passed in here directly as a multiline string or list. Ignored if ``source`` is set.
content_pillar
Pillar to load the script contents from. Ignored if ``contents`` or ``source`` is set.
baseimage:
Image to base the build on. Ignored if ``source`` is used. Can also be included directly
using the ``FROM`` Dockerfile command.
maintainer:
Maintainer to state in the image. Ignored if ``source`` is used. Can also be included
using the ``MAINTAINER`` Dockerfile command.
kwargs
Additional keyword arguments for building the Docker image.
'''
ignore_existing = kwargs.pop('ignore_existing', False)
if not ignore_existing and __salt__['container_map.image_tag_exists'](name):
return dict(result=True, name=name, changes={}, comment="Image exists.")
res = __salt__['container_map.build'](name, **kwargs)
res['name'] = name
return res
def mod_watch(name, sfun=None, **kwargs):
if sfun == 'updated':
kwargs['send_signal'] = True
return updated(name, **kwargs)
elif sfun == 'built':
kwargs['ignore_existing'] = True
return built(name, **kwargs)
return dict(name=name, result=False, changes={}, comment='watch requisite is not implemented for {0}'.format(sfun)) | /salt-container-map-0.2.2.tar.gz/salt-container-map-0.2.2/saltcontainermap/states/container_map.py | 0.869798 | 0.267953 | container_map.py | pypi |
# salt-eventsd
A project based on but not related to saltstack
## The current stable release is tagged as: 0.9.3
If you are already using salt-eventsd, check the changelog for the latest changes and fixes.
Due to public request, i pushed the develop-branch to github for everyone to try out. From today
on, the latest bleeding-edge salt-eventsd will always be in the develop branch with new release
getting tagged.
Please note, that i reserve the right to brake develop. Even though i always test all changes
locally before pushing them to github, it may happen.
### Updating from 0.9 to 0.9.3
See the changelog for improvements in 0.9.3. For more info see installation.txt.
IMPORTANT:
If you're coming from 0.9 make sure, that you make the following changes to your config:
Rename: 'stat_upd' to 'stat_timer'
Add: 'stat_worker: False' (see installation.txt for details on it)
### Availability Notes
#### Pypi
As of Jan 22nd, we are on pypi: https://pypi.python.org/pypi/salt-eventsd/
#### Debian / Ubuntu
A debian-package can be built straight from the repo by running 'dpkg-buildpackage -b'. All dependencies
have to be installed of course.
#### Redhat / CentOS
There are no packages for redhat yet. If you have the knowledge and the ressources to support that, feel
free to submit the necessary changes.
### What it does
A event-listener daemon for saltstack that writes data into mysql, postgres, statistical data into graphite, mongo,
etc. All events that occur on saltstacks eventbus can be handled and pushed to other daemons, databases, etc. You
decide yourself!
The daemon connects to the salt-masters event-bus and listens for all events. Depending on the configuration,
certain events can be collected by their tag and/or function-name and handed down to different workers. The
workers then extract the desired data-fields from the return and process them further in a user-definable way.
### Dependencies
Required python runtime dependencies:
- salt >= 0.16.2
- mysql-python
- argparse
- pyzmq
Optional/usefull dependencies
- simplejson (Install with: pip install simplejson)
### Usage Examples
- collect all events with tag 'new_job' to have a job-history that lasts longer than saltstacks job-cache
- collect all job returns by matching on job-return-tagged event returned from minions to have a database with all returns you can index, search, etc.
- filter events into different backends like graphite, mongodb, mysql, postgres, whatever...
- collect historic data like load average etc. by collecting events with tag 'load' which are created by your own load-monitoring module
- create and collect your own custom backends that process you event-data
- etc.
### Why this is useful / Who needs this?
Currently saltstack does not have an external job-cache that works without a returner. Using returners and by that losing salt encryption
is not always desirable or maybe not even be an option. With this daemon, you can collect all data right where its created and returned: on the salt-master.
While saltstacks job-cache works well in smaller environments, in larger environments the job-cache can become a burden for the salt-master. Especially
if the job-cache should be kept for a longer period of time, and im talking weeks and month here. This is where the salt-eventsd jumps in. With the
default mysql-backend, its easy to collect data for weeks and weeks without burdening the salt-master to keep track of jobs and their results in the
job-cache.
Saltstacks job-cache can be completely disabled because all the data is in an independent database, fully indexed, searcheable and
easily cleaned up and/or archived with a few querys.
In larger environments it is also a good idea, to seperate different services from one another. With salt-eventsd you can use saltstack for
communication and salt-eventsd to collect the actual data. The benefit is, that the salt-master does not need to be restarted just because changes
were done for example to a reactor or a runner.
### Features
- collect events from the salt-event-bus into a different backends
- collect a configurable amount of events before pushing them into different backends
- define Prio1 events that are pushed immediately without queuing them first
- write your own backends with ease (some python knowledge required)
- use regular expressions for matching on events, very flexible and powerful
- have events send to two backends for having a command+return history as well as having the data pushed elsewhere
- create your own sql-query-templates for inserting data into the database
- fully saltstack-job-cache independant database to hold all data you want in it
- example workers are found in the doc-directory
### Testing
py.test is used to run all available tests.
To install all test dependencies you must first install all test dependencies by running
```
$ pip install -r dev-requirements.txt
```
It is reccomended to install all dependencies inside a virtualenv for easy isolation.
To run all tests simply the following in the root folder
```
py.test
```
Good options to use is `-x` for pdb debugging and `-s` for showing prints and log output.
### Benchmark
There is a simple benchmark script that can be used to test the performance of the code manually.
The script setups almost all required mocking and config inside the script.
Dependencies that is required is:
- mock (pip install mock)
Copy the worker file `doc/share/doc/eventsd_workers/Bench_Worker.py` to `/etc/salt/eventsd_workers/Bench_Worker.py`
Run the script with `python benchmark.py`
| /salt-eventsd-0.9.3.tar.gz/salt-eventsd-0.9.3/README.md | 0.452052 | 0.688108 | README.md | pypi |
PACKAGE_INIT = """\
# pylint: disable=missing-module-docstring
import pathlib
PACKAGE_ROOT = pathlib.Path(__file__).resolve().parent
try:
from .version import __version__
except ImportError: # pragma: no cover
__version__ = "0.0.0.not-installed"
try:
from importlib.metadata import version, PackageNotFoundError
try:
__version__ = version(__name__)
except PackageNotFoundError:
# package is not installed
pass
except ImportError:
try:
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
except ImportError:
# pkg resources isn't even available?!
pass
"""
LOADERS_TEMPLATE = '''\
"""
Define the required entry-points functions in order for Salt to know
what and from where it should load this extension's loaders
"""
from . import PACKAGE_ROOT # pylint: disable=unused-import
{% for loader in loaders %}
{%- set loader_docstring = loader.rstrip("s") %}
{%- if loader_docstring == "module" %}
{%- set loader_docstring = "execution" %}
{%- endif %}
def get_{{ loader }}_dirs():
"""
Return a list of paths from where salt should load {{ loader.rstrip("s") }} modules
"""
return [str(PACKAGE_ROOT / "{{ loader.rstrip("s") + "s" }}")]
{% endfor %}
'''
LOADER_TEMPLATE = '''\
{%- set loader_name = loader.rstrip("s") -%}
"""
Salt {{ loader_name }} module
"""
import logging
log = logging.getLogger(__name__)
__virtualname__ = "{{ package_name }}"
def __virtual__():
# To force a module not to load return something like:
# return (False, "The {{ project_name }} {{ loader_name }} module is not implemented yet")
return __virtualname__
'''
MODULE_LOADER_TEMPLATE = '''\
"""
Salt execution module
"""
import logging
log = logging.getLogger(__name__)
__virtualname__ = "{{ package_name }}"
def __virtual__():
# To force a module not to load return something like:
# return (False, "The {{ project_name }} {{ loader_name }} module is not implemented yet")
return __virtualname__
def example_function(text):
"""
This example function should be replaced
CLI Example:
.. code-block:: bash
salt '*' {{ package_name}}.example_function text="foo bar"
"""
return __salt__["test.echo"](text)
'''
STATE_LOADER_TEMPLATE = '''\
{%- set loader_name = loader.rstrip("s") %}
"""
Salt {{ loader_name }} module
"""
import logging
log = logging.getLogger(__name__)
__virtualname__ = "{{ package_name }}"
def __virtual__():
# To force a module not to load return something like:
# return (False, "The {{ project_name }} {{ loader_name }} module is not implemented yet")
# Replace this with your own logic
if "{{package_name}}.example_function" not in __salt__:
return False, "The '{{package_name}}' execution module is not available"
return __virtualname__
def exampled(name):
"""
This example function should be replaced
"""
ret = {"name": name, "changes": {}, "result": False, "comment": ""}
value = __salt__["{{package_name}}.example_function"](name)
if value == name:
ret["result"] = True
ret["comment"] = "The '{{package_name}}.example_function' returned: '{}'".format(value)
return ret
'''
LOADER_MODULE_UNIT_TEST_TEMPLATE = """\
import pytest
import salt.modules.test as testmod
import saltext.{{ package_name }}.{{ loader.rstrip("s") + "s" }}.{{ package_name }}_mod as {{ package_name }}_module
@pytest.fixture
def configure_loader_modules():
module_globals = {
"__salt__": {"test.echo": testmod.echo},
}
return {
{{ package_name }}_module: module_globals,
}
def test_replace_this_this_with_something_meaningful():
echo_str = "Echoed!"
assert {{ package_name }}_module.example_function(echo_str) == echo_str
"""
LOADER_STATE_UNIT_TEST_TEMPLATE = """\
import pytest
import salt.modules.test as testmod
import saltext.{{ package_name }}.modules.{{ package_name }}_mod as {{ package_name }}_module
import saltext.{{ package_name }}.{{ loader.rstrip("s") + "s" }}.{{ package_name }}_mod as {{ package_name }}_state
@pytest.fixture
def configure_loader_modules():
return {
{{ package_name }}_module: {
"__salt__": {
"test.echo": testmod.echo,
},
},
{{ package_name }}_state: {
"__salt__": {
"{{ package_name }}.example_function": {{ package_name }}_module.example_function,
},
},
}
def test_replace_this_this_with_something_meaningful():
echo_str = "Echoed!"
expected = {
"name": echo_str,
"changes": {},
"result": True,
"comment": "The '{{package_name}}.example_function' returned: '{}'".format(echo_str),
}
assert {{ package_name }}_state.exampled(echo_str) == expected
"""
LOADER_UNIT_TEST_TEMPLATE = """\
{%- set loader_name = loader.rstrip("s") %}
import pytest
import saltext.{{ package_name }}.{{ loader.rstrip("s") + "s" }}.{{ package_name }}_mod as {{ package_name }}_{{ loader_name }}
@pytest.fixture
def configure_loader_modules():
module_globals = {
"__salt__": {"this_does_not_exist.please_replace_it": lambda: True},
}
return {
{{ package_name }}_{{ loader_name }}: module_globals,
}
def test_replace_this_this_with_something_meaningful():
assert "this_does_not_exist.please_replace_it" in {{ package_name }}_{{ loader_name }}.__salt__
assert {{ package_name }}_{{ loader_name }}.__salt__["this_does_not_exist.please_replace_it"]() is True
"""
LOADER_MODULE_INTEGRATION_TEST_TEMPLATE = """\
import pytest
pytestmark = [
pytest.mark.requires_salt_modules("{{ package_name }}.example_function"),
]
def test_replace_this_this_with_something_meaningful(salt_call_cli):
echo_str = "Echoed!"
ret = salt_call_cli.run("{{ package_name}}.example_function", echo_str)
assert ret.exitcode == 0
assert ret.json
assert ret.json == echo_str
""" | /salt-extension-0.23.0.tar.gz/salt-extension-0.23.0/src/saltext/cli/templates.py | 0.479016 | 0.162115 | templates.py | pypi |
import enum
import json
from io import BytesIO
from typing import BinaryIO
import PyPDF2
from salt_finder_charts.finder_chart import FinderChart
from salt_finder_charts.util import Metadata
class OutputFormat(enum.Enum):
"""
Available output formats.
"""
PDF = "PDF"
PNG = "PNG"
SVG = "SVG"
def extension(self):
"""
The extension to use in filenames of files in this format.
Returns
-------
str
The file extension.
"""
if self == OutputFormat.PDF:
return "pdf"
elif self == OutputFormat.PNG:
return "png"
elif self == OutputFormat.SVG:
return "svg"
else:
raise ValueError(
f"No file extension defined for output format {self.value}."
)
def mime_type(self) -> str:
"""
The MIME type for this output format.
Returns
-------
str
MIME type
"""
if self == OutputFormat.PDF:
return "application/pdf"
elif self == OutputFormat.PNG:
return "image/png"
elif self == OutputFormat.SVG:
return "image/svg+xml"
else:
raise ValueError(f"No MIME type defined for output format {self.value}")
def output_pdf(finder_chart: FinderChart, metadata: Metadata) -> BinaryIO:
"""
Generate a binary stream with a PDF document containing the given finder chart.
Parameters
----------
finder_chart : FinderChart
Finding chart.
metadata : FinderChartMetadata
Finding chart metadata.
Returns
-------
BytesIO
A binary stream containing a PDF document with the finder chart.
"""
out = BytesIO()
finder_chart.plot.save(out, format="pdf")
pdf = PyPDF2.PdfFileReader(out)
writer = PyPDF2.PdfFileWriter()
writer.addAttachment("metadata", json.dumps(metadata).encode("UTF-8"))
writer.addPage(pdf.getPage(0))
bytes_stream = BytesIO()
writer.write(bytes_stream)
bytes_stream.seek(0)
return bytes_stream
def output_png(finder_chart: FinderChart, metadata: Metadata) -> BinaryIO:
"""
Generate a binary stream with a PDF document containing the given finder chart.
Parameters
----------
finder_chart : FinderChart
Finding chart.
metadata : FinderChartMetadata
Finding chart metadata.
Returns
-------
BytesIO
A binary stream containing a PDF document with the finder chart.
"""
out = BytesIO()
finder_chart.plot.save(out, format="png")
return out
def output_svg(finder_chart: FinderChart, metadata: Metadata) -> BinaryIO:
"""
Generate a binary stream with a PDF document containing the given finder chart.
Parameters
----------
finder_chart : FinderChart
Finding chart.
metadata : FinderChartMetadata
Finding chart metadata.
Returns
-------
BytesIO
A binary stream containing a PDF document with the finder chart.
"""
out = BytesIO()
finder_chart.plot.save(out, format="svg")
return out | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/output.py | 0.906691 | 0.448728 | output.py | pypi |
from abc import ABC
import bisect
from datetime import datetime, timedelta
from typing import List, NamedTuple, Optional, Tuple
from astropy.units import Quantity
from astropy import units as u
from astroquery.jplhorizons import Horizons
from dateutil.parser import parse
from dateutil.tz import tzutc
from salt_finder_charts.util import MagnitudeRange, Metadata
class Ephemeris(NamedTuple):
"""
An ephemeris.
Properties
----------
dec : Quantity
Declination, as an angle between -90 and 90 degrees.
dec_rate : Quantity
Proper motion (rate of change) of the declination.
epoch : datetime
The epoch, i.e. a datetime for which this ephemeris is valid.
magnitude_range : Optional[MagnitudeRange]
The range of magnitudes the target may have, plus the bandpass for which the
magnitudes are given.
ra : Quantity
Right ascension, as an angle between 0 and 360 degrees.
ra_rate : Quantity
Proper motion (rate of change) of the right ascension.
"""
dec: Quantity
dec_rate: Quantity
epoch: datetime
magnitude_range: Optional[MagnitudeRange]
ra: Quantity
ra_rate: Quantity
class EphemerisService(ABC):
"""
An abstract base class for services providing ephemerides.
"""
def ephemerides(self, start_time: datetime, end_time: datetime) -> List[Ephemeris]:
"""
A list of ephemeris values that covers at least a given time interval.
The ephemeris values are sorted by time (from earliest to latest).
The first returned ephemeris may be for a time earlier than the start time and
the last returned ephemeris may be for a time later than the end time.
Parameters
----------
start_time : datetime
Start time.
end_time : datetime
End time.
Returns
-------
list of Ephemeris
List of ephemeris values at least covering the interval.
"""
raise NotImplementedError
def is_sidereal_target(self) -> bool:
"""
Whether the ephemerides are for a sidereal target.
Returns
-------
bool
Whether the ephemerides are for a sidereal target.
"""
raise NotImplementedError
@staticmethod
def center_position(ephemerides: List[Ephemeris]) -> Tuple[Quantity, Quantity]:
"""
The center of the positions defined in a list of ephemerides.
The center position is returned as a tuple of the right ascension and the
declination.
If the maximum and minimum right ascension differ by more than 180 degrees, it
is assumed that the target right ascension crosses 360 degrees mark.
If the center right ascension is greater than 360 degrees, the equivalent angle
netween 0 and 360 degrees is used instead.
Parameters
----------
ephemerides : list of Ephemeris
Ephemerides.
Returns
-------
Tuple[Quantity, Quantity]
The center position.
"""
ra_key = lambda e: e.ra
ra_min = min(ephemerides, key=ra_key).ra
ra_max = max(ephemerides, key=ra_key).ra
if ra_max - ra_min > 180 * u.deg:
ra_min += 360 * u.deg
dec_key = lambda e: e.dec
dec_min = min(ephemerides, key=dec_key).dec
dec_max = max(ephemerides, key=dec_key).dec
ra_center = (ra_min + ra_max) / 2.0
dec_center = (dec_min + dec_max) / 2.0
if ra_center > 360 * u.deg:
ra_center -= 360 * u.deg
return ra_center, dec_center
@staticmethod
def find_magnitude_range(ephemerides: List[Ephemeris]) -> Optional[MagnitudeRange]:
"""
The overall magnitude range for a list of ephemerides.
If the magnitude range of any ephemeris is None, or if the minimum or maximum
magnitude, ot if the bandpass is not the same for all ephemerides, or if the
given list of ephemerides is empty, None is returned.
Parameters
----------
ephemerides : list of Ephemeris
Ephemerides.
Returns
-------
Optional[MagnitudeRange]
Magnitude range.
"""
if len(ephemerides) == 0:
return None
bandpass = None
if ephemerides[0].magnitude_range:
bandpass = ephemerides[0].magnitude_range.bandpass
if bandpass is None:
return None
min_magnitude = 1e100
max_magnitude = 1e-100
for ephemeris in ephemerides:
mr = ephemeris.magnitude_range
if (
mr is None
or mr.min_magnitude is None
or mr.max_magnitude is None
or mr.bandpass != bandpass
):
return None
if mr.min_magnitude < min_magnitude:
min_magnitude = mr.min_magnitude
if mr.max_magnitude > max_magnitude:
max_magnitude = mr.max_magnitude
return MagnitudeRange(
min_magnitude=min_magnitude, max_magnitude=max_magnitude, bandpass=bandpass
)
def metadata(self) -> Metadata:
"""
Metadata characterising this ephemeris service.
Returns
-------
Metadata
Metadata for the ephemeris service.
"""
raise NotImplementedError
def _cover_time_interval(
ephemerides: List[Ephemeris], start_time: datetime, end_time: datetime
) -> List[Ephemeris]:
"""
The smallest possible list of ephemerides from a given list that completely covers
a given time interval.
Parameters
----------
ephemerides : list of Ephemeris
start_time : timezone-aware start time
end_time : timezone-aware end time
Returns
-------
list of Ephemeris
The list of ephemerides covering the time interval.
"""
if start_time.tzinfo is None or end_time.tzinfo is None:
raise ValueError("The start and end time must be timezone-aware.")
if start_time >= end_time:
raise ValueError("The start time must be earlier than the end time")
all_times = [e.epoch for e in ephemerides]
start_index = bisect.bisect_right(all_times, start_time)
if not start_index and all_times[0] != start_index:
raise ValueError("The start time isn't covered by the ephemerides.")
end_index = bisect.bisect(all_times, end_time)
if end_index == len(all_times) and all_times[-1] != end_time:
raise ValueError("The end time isn't covered by the ephemerides.")
if end_index > 0 and all_times[end_index - 1] == end_time:
end_index -= 1
return ephemerides[start_index - 1 : end_index + 1]
class ConstantEphemerisService(EphemerisService):
"""
An ephemeris generator for constant ephemerides.
Parameters
----------
ra : Quantity
Right ascension, as an angle.
dec : Quantity
Declination, as an angle.
magnitude_range : MagnitudeRange
Magnitude range (optional).
"""
def __init__(
self, ra: Quantity, dec: Quantity, magnitude_range: Optional[MagnitudeRange]
) -> None:
self.ra = ra
self.dec = dec
self.magnitude_range = magnitude_range
def ephemerides(self, start_time: datetime, end_time: datetime) -> List[Ephemeris]:
# enforce timezones
if start_time.tzinfo is None or end_time.tzinfo is None:
raise ValueError("The start and end time must be timezone-aware.")
return [
Ephemeris(
dec=self.dec,
dec_rate=0 * u.deg / u.second,
epoch=start_time,
magnitude_range=self.magnitude_range,
ra=self.ra,
ra_rate=0 * u.deg / u.second,
),
Ephemeris(
dec=self.dec,
dec_rate=0 * u.deg / u.second,
epoch=end_time,
magnitude_range=self.magnitude_range,
ra=self.ra,
ra_rate=0 * u.deg / u.second,
),
]
def is_sidereal_target(self) -> bool:
return True
def metadata(self) -> Metadata:
return dict()
class HorizonsEphemerisService(EphemerisService):
"""
An ephemeris generator using the JPL Horizons service.
In order to avoid missing ephemerides, you should choose a start time at least two
days earlier than the start time from which you generate finder charts, and you
should choose an end time at least two days later than the end time until which
you generate finder charts. This is necessary as the calculated visibility windows
are not strictly confined to the time interval for which finder charts shall be
generated.
Parameters
----------
object_id : str
Identifier of the object whose ephemerides are generated.
start_time : datetime
Time of the first ephemeris to get from Horizons (must be timezone-aware).
end_time : datetime
Time of the last ephemeris to get from Horizons (must be timezone-aware).
stepsize : Quantity
Time between ephemerides queried from Horizons (must be at least 5 minutes).
"""
def __init__(
self,
object_id: str,
start_time: datetime,
end_time: datetime,
stepsize: Quantity,
):
SALT_OBSERVATORY_ID = "B31"
# enforce timezones
if start_time.tzinfo is None or end_time.tzinfo is None:
raise ValueError("The start and end time must be timezone-aware.")
# avoid overly excessive queries
self.stepsize = stepsize
if self.stepsize < 5 * u.minute:
raise ValueError("The sampling interval must be at least 5 minutes.")
# query Horizons
self.object_id = object_id
start = start_time.astimezone(tzutc()).strftime("%Y-%m-%d %H:%M:%S")
# Make sure the whole time interval is covered by the queried ephemerides
end_time_with_margin = end_time + timedelta(seconds=stepsize.to_value(u.second))
stop = end_time_with_margin.astimezone(tzutc()).strftime("%Y-%m-%d %H:%M:%S")
# Horizons requires an int for the step size. As round() might call NumPy's
# round method and thus produce a float, we have to round "manually" using
# the int function.
step = f"{int(0.5 + stepsize.to_value(u.minute))}m"
obj = Horizons(
id=self.object_id,
location=SALT_OBSERVATORY_ID,
epochs={"start": start, "stop": stop, "step": step},
)
ephemerides = obj.ephemerides()
# store the ephemerides in the format we need
self._ephemerides = []
for row in range(len(ephemerides)):
epoch = parse(ephemerides["datetime_str"][row]).replace(tzinfo=tzutc())
ra = float(ephemerides["RA"][row]) * u.deg
dec = float(ephemerides["DEC"][row]) * u.deg
ra_rate = float(ephemerides["RA_rate"][row]) * u.arcsec / u.hour
dec_rate = ephemerides["DEC_rate"][row] * u.arcsec / u.hour
magnitude = ephemerides["V"][row] if "V" in ephemerides.keys() else 0
magnitude_range = MagnitudeRange(
min_magnitude=magnitude, max_magnitude=magnitude, bandpass="V"
)
self._ephemerides.append(
Ephemeris(
ra=ra,
dec=dec,
ra_rate=ra_rate,
dec_rate=dec_rate,
magnitude_range=magnitude_range,
epoch=epoch,
)
)
def ephemerides(self, start_time: datetime, end_time: datetime) -> List[Ephemeris]:
if start_time.tzinfo is None or end_time.tzinfo is None:
raise ValueError("The start and end time must be timezone-aware.")
return _cover_time_interval(self._ephemerides, start_time, end_time)
def is_sidereal_target(self) -> bool:
return False
def metadata(self) -> Metadata:
return {
"horizons_id": self.object_id,
"horizons_stepsize": f"{self.stepsize.to_value(u.min)} min",
} | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/ephemerides.py | 0.962761 | 0.641886 | ephemerides.py | pypi |
import enum
from abc import ABC
from typing import Any, Optional
import astropy.units as u
from astropy.units import Quantity
from salt_finder_charts.position_angle import estimated_position_angle
from salt_finder_charts.util import MOSMask, Metadata
class Mode(enum.Enum):
"""
Instrument mode.
"""
HRS = "hrs"
IMAGING = "imaging"
LONGSLIT = "ls"
MOS = "mos"
SLOT = "slot"
class ModeDetails(ABC):
"""
Observation mode details.
Parameters
----------
mode : Mode
Observation mode.
"""
def __init__(self, mode: Mode):
self._mode = mode
@property
def mode(self) -> Mode:
return self._mode
def position_angle(self) -> Quantity:
"""
The position angle.
Returns
-------
Quantity
The position angle.
"""
raise NotImplementedError
def automated_position_angle(self) -> bool:
"""
Whether the position angle has been calculated automatically rather than having
been supplied.
Returns
-------
bool
Whether the position angle has been calculated.
"""
raise NotImplementedError
def metadata(self) -> Metadata:
"""
Metadata characterising these mode details.
Returns
-------
Metadata
Metadata for these mode details.
"""
raise NotImplementedError
def annotate_finder_chart(self, finder_chart: Any) -> None:
"""
Add the mode specific content to a finder chart.
Parameters
----------
finder_chart : FindingChart
Finding chart.
"""
raise NotImplementedError
class ImagingModeDetails(ModeDetails):
"""
Details for the imaging mode.
Parameters
----------
pa : Optional[Quantity]
Position angle.
"""
def __init__(self, pa: Optional[Quantity]):
super().__init__(Mode.IMAGING)
if pa is not None:
self.pa = pa
self.automated_pa = False
else:
self.pa = 0 * u.deg
self.automated_pa = True
def position_angle(self) -> Quantity:
return self.pa
def automated_position_angle(self) -> bool:
return self.automated_pa
def metadata(self) -> Metadata:
return {}
def annotate_finder_chart(self, finder_chart: Any) -> None:
# indicate field of view for BVIT
finder_chart.draw_circle(
finder_chart.ra, finder_chart.dec, 0.8 * u.arcmin, color="g"
)
finder_chart.plot.add_label(
0.57,
0.57,
"BVIT",
relative=True,
style="italic",
weight="bold",
size="large",
horizontalalignment="left",
color=(0, 0, 1),
)
class SlotModeDetails(ModeDetails):
"""
Details for the slot mode.
Parameters
----------
pa : Optional[Quantity]
Position angle.
"""
def __init__(self, pa: Optional[Quantity]):
super().__init__(Mode.SLOT)
if pa is not None:
self.pa = pa
self.automated_pa = False
else:
self.pa = 0 * u.deg
self.automated_pa = True
def position_angle(self) -> Quantity:
return self.pa
def automated_position_angle(self) -> bool:
return self.automated_pa
def metadata(self) -> Metadata:
return {}
def annotate_finder_chart(self, finder_chart: Any) -> None:
finder_chart.draw_centered_rectangle(
self.pa + 90 * u.deg,
2.0 * u.arcmin / 6.0,
10.0 * u.arcmin,
finder_chart.ra,
finder_chart.dec,
color="r",
linewidth=2,
alpha=0.5,
)
class LongslitModeDetails(ModeDetails):
"""
Details for the longslit mode.
Parameters
----------
slitwidth : Quantity
Slitwidth, as an angle.
pa : Optional[Quantity]
Position angle.
center_ra : Quantity
Right ascension of the slit center, as an angle
center_dec : Quantity
Declination of the slit enter, as an angle
"""
def __init__(
self,
slitwidth: Quantity,
pa: Optional[Quantity],
center_ra: Quantity,
center_dec: Quantity,
):
super().__init__(Mode.LONGSLIT)
if pa is not None:
self.pa = pa
self.automated_pa = False
else:
self.pa = estimated_position_angle(center_ra, center_dec)
self.automated_pa = True
self.slitwidth = slitwidth
def position_angle(self) -> Quantity:
return self.pa
def automated_position_angle(self) -> bool:
return self.automated_pa
def metadata(self) -> Metadata:
return {"slitwidth": f"{self.slitwidth.to_value(u.arcsec)} arcsec"}
def annotate_finder_chart(self, finder_chart: Any) -> None:
# draw the slit
finder_chart.draw_centered_rectangle(
self.pa,
self.slitwidth,
8.0 * u.arcmin,
finder_chart.ra,
finder_chart.dec,
color="r",
linewidth=1,
alpha=0.5,
)
class MOSModeDetails(ModeDetails):
"""
Details for the MOS mode.
Parameters
----------
mos_mask : MOSMask
MOS mask.
"""
def __init__(self, mos_mask: MOSMask):
super().__init__(Mode.MOS)
self.mos_mask = mos_mask
def position_angle(self) -> Quantity:
return self.mos_mask.position_angle
def automated_position_angle(self) -> bool:
return False
def metadata(self) -> Metadata:
return {"mask_xml": self.mos_mask.xml}
def annotate_finder_chart(self, finder_chart: Any) -> None:
# draw the slits
pa = self.mos_mask.position_angle
for slit in self.mos_mask.slits:
finder_chart.draw_centered_rectangle(
pa + slit.tilt, slit.width, slit.height, slit.ra, slit.dec, color="r"
)
# make bigger boxes around the reference objects
for ref in self.mos_mask.reference_stars:
finder_chart.draw_centered_rectangle(
pa,
5.0 * u.arcsec,
5.0 * u.arcsec,
ref.ra,
ref.dec,
color=(1, 1, 0),
linewidth=2,
) | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/mode.py | 0.944919 | 0.416144 | mode.py | pypi |
import bisect
from datetime import datetime, timedelta
from typing import cast, List, Optional, Tuple
from astropy.units import Quantity
from astropy import units as u
from dateutil.tz import tzutc
import ephem
import numpy as np
from salt_finder_charts.ephemerides import EphemerisService
SUTH_LONGITUDE = 20.8108 * u.deg
SUTH_LATITUDE = -32.3755556 * u.deg
MIN_ALTITUDE_SINE = np.sin(46.18 * u.deg)
MAX_ALTITUDE_SINE = np.sin(59.36 * u.deg)
def visibility_windows(
ephemeris_service: EphemerisService, start_time: datetime, end_time: datetime
) -> List[Tuple[datetime, datetime]]:
"""
Returns the visibility windows for all the nights in the
specified time interval.
Both the start and end time should be outside a night.
Parameters
----------
ephemeris_service : EphemerisService
Service for getting the required ephemerides.
start_time : datetime
Start time (must be timezone-aware).
end_time: datetime
End time (must be timezone-aware).
Returns
-------
list of intervals
The visibility windows.
"""
# enforce timezones
if start_time.tzinfo is None or end_time.tzinfo is None:
raise ValueError("The start and end time must be timezone-aware.")
if start_time >= end_time:
raise Exception("The start time must be earlier than the end time.")
t = start_time
dt = timedelta(days=1)
windows = []
while t < end_time:
windows_for_night = _visibility_windows_next_night(ephemeris_service, t)
for window in windows_for_night:
window_start = window[0]
if window_start < end_time:
window_end = min(window[1], end_time)
windows.append((window_start, window_end))
t += dt
return windows
def fov_fitting_intervals(
intervals: List[Tuple[datetime, datetime]],
ephemeris_generator: EphemerisService,
fov_radius: Quantity,
) -> List[Tuple[datetime, datetime]]:
"""
Split time intervals up so that the positions within every interval fit into the
field of view.
Parameters
----------
intervals : list of intervals
Time intervals to split up.
ephemeris_generator : EphemerisService
Generator for the required ephemerides.
fov_radius : Quantity
The radius of the field of view, as an angle.
Returns
-------
list of tuple
The list of time intervals.
"""
fitting_intervals: List[Tuple[datetime, datetime]] = []
for interval in intervals:
start = interval[0]
end = interval[1]
ephemerides = ephemeris_generator.ephemerides(start, end)
epochs = [e.epoch for e in ephemerides]
right_ascensions = [e.ra for e in ephemerides]
declinations = [e.dec for e in ephemerides]
# handle the transition from 360 to 0 degrees
_transform_right_ascensions(right_ascensions, True)
# find the RA, dec center
center_ra, center_dec = EphemerisService.center_position(ephemerides)
# are all positions located in the FOV around this center?
all_in_fov = True
for i in range(0, len(epochs)):
if not is_in_fov(
center_ra, center_dec, right_ascensions[i], declinations[i], fov_radius
):
all_in_fov = False
break
if all_in_fov:
fitting_intervals.append(interval)
else:
# split the interval in half (more or less)
if len(epochs) <= 2:
raise ValueError("The ephemerides are too sparse to cover the FOV.")
td = epochs[-1] - epochs[0]
dt = (
td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6
) / 10 ** 6
dt_center = int(round(0.5 * dt))
center_time = epochs[0] + timedelta(seconds=dt_center)
index = bisect.bisect(epochs, center_time)
if index == len(epochs) - 1:
index -= 1
interval1 = (epochs[0], epochs[index])
interval2 = (epochs[index], epochs[-1])
if interval1[0] != interval1[1]:
fitting_intervals.extend(
fov_fitting_intervals([interval1], ephemeris_generator, fov_radius)
)
if interval2[0] != interval2[1]:
fitting_intervals.extend(
fov_fitting_intervals([interval2], ephemeris_generator, fov_radius)
)
return fitting_intervals
def is_in_fov(
center_ra: Quantity,
center_dec: Quantity,
ra: Quantity,
dec: Quantity,
fov_radius: Quantity,
) -> bool:
"""
Check whether a right ascension and declination are within the field of view (FOV).
Parameters
----------
center_ra : Quantity
Right ascension of the FOV's center, as an angle.
center_dec : Quantity
Declination of the FOV's center, as an angle.
ra : Quantity
Right ascension, as an angle.
dec : Quantity
Declination, as an angle
fov_radius : Quantity
Radius of the FOV, as an angle.
Returns
-------
bool
Whether the right ascension and declination are within the field of view.
"""
dra = (ra - center_ra) * abs(np.cos(dec))
ddec = dec - center_dec
d = np.sqrt(dra ** 2 + ddec ** 2)
return cast(bool, d <= fov_radius)
def _is_visible_with_salt(ra: Quantity, dec: Quantity, t: datetime) -> bool:
"""
Checks whether a target is visible by SALT.
Parameters
----------
ra: Quantity
Right ascension.
dec: Quantity
Declination.
t: datetime
Time (must be timezone-aware).
"""
observer = _salt_observer(t)
lst = observer.sidereal_time() * u.radian # PyEphem uses radians
hour_angle = lst - ra
phi = SUTH_LATITUDE
h = hour_angle
sin_altitude = np.sin(phi) * np.sin(dec) + np.cos(phi) * np.cos(dec) * np.cos(h)
return cast(bool, MIN_ALTITUDE_SINE < sin_altitude < MAX_ALTITUDE_SINE)
def _visibility_windows_next_night(
ephemeris_service: EphemerisService, t: datetime
) -> List[Tuple[datetime, datetime]]:
"""
Returns the visibility windows for the night following
the given time.
Parameters
----------
ephemeris_service : EphemerisService
Service to use for getting the required ephemerides.
t: datetime
Time (must be timezone-aware).
Returns
-------
list of tuple
The visibility windows for the next night.
"""
# get the night data
observer = _salt_observer(t)
sunset = observer.next_setting(ephem.Sun()).datetime().replace(tzinfo=tzutc())
sunrise = observer.next_rising(ephem.Sun()).datetime().replace(tzinfo=tzutc())
night_ephemerides = ephemeris_service.ephemerides(sunset, sunrise)
# get the visibility windows
windows: List[Tuple[datetime, datetime]] = []
dt = timedelta(seconds=300)
tp = sunset
window_start: Optional[datetime] = None
window_end: Optional[datetime] = None
while tp <= sunrise:
epochs = [e.epoch for e in night_ephemerides]
index = bisect.bisect(epochs, tp)
ra = night_ephemerides[index].ra
dec = night_ephemerides[index].dec
visible = _is_visible_with_salt(ra, dec, tp)
if visible:
if window_start is None:
window_start = tp
window_end = tp
else:
if window_start is not None and window_end is not None:
windows.append((window_start - dt, window_end + dt))
window_start = None
window_end = None
tp += dt
if window_start is not None and window_end is not None:
windows.append((window_start - dt, window_end + dt))
return windows
def _salt_observer(t: datetime) -> ephem.Observer:
"""
Returns a PyEphem Observer instance for the right ascension and declination of
Sutherland.
Parameters
----------
t: datetime
Datetime for the observer (must be timezone-aware).
Returns
-------
Observer
PyEphem Observer instance.
"""
# Ensure timezone.
if t.tzinfo is None:
raise ValueError("The time must be timezone-aware.")
observer = ephem.Observer()
observer.lat = SUTH_LATITUDE.to_value(u.radian)
observer.lon = SUTH_LONGITUDE.to_value(u.radian)
observer.date = ephem.Date(t)
return observer
def _transform_right_ascensions(
right_ascensions: List[Quantity], continuous_at_360: bool
) -> None:
"""
Transform the given right ascensions. If the continuousAt360 flag is true, values
between 0 and 1 degree are increased by 360 degrees if there are values between 359
and 360 degrees. Otherwise values greater than or equal to 360 degrees are reduced
by 360 degrees.
The transformation is done in place.
This function assumes that the difference between subsequent right ascensions does
not exceed 1 degree and that the overall range of right ascensions does not exceed a
few degrees.
Parameters
----------
right_ascensions : list of Quantity
List of right ascensions to transform.
continuous_at_360 : bool
Whether the transformed right ascensions may extend beyond 360 degrees.
"""
if continuous_at_360:
just_before_360 = False
just_after_0 = False
for r in right_ascensions:
if r >= 359 * u.deg:
just_before_360 = True
if r <= 1 * u.deg:
just_after_0 = True
if just_before_360 and just_after_0:
for i in range(0, len(right_ascensions)):
if right_ascensions[i] <= 1 * u.deg:
right_ascensions[i] += 360 * u.deg
else:
for i in range(0, len(right_ascensions)):
if right_ascensions[i] >= 360 * u.deg:
right_ascensions[i] -= 360 * u.deg | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/visibility.py | 0.932607 | 0.405302 | visibility.py | pypi |
from datetime import datetime, timedelta
from typing import BinaryIO, Generator, Optional, Tuple
import astropy.units as u
import pytz
from astropy.units import Quantity
from salt_finder_charts.image import Survey, SurveyImageService
from salt_finder_charts.mode import (
Mode,
ModeDetails,
ImagingModeDetails,
LongslitModeDetails,
SlotModeDetails,
MOSModeDetails,
)
from salt_finder_charts.output import output_pdf, output_png, output_svg, OutputFormat
from salt_finder_charts.util import (
MagnitudeRange,
MOSMask,
julian_day_start,
julian_day_end,
)
from salt_finder_charts import finder_charts
from salt_finder_charts.ephemerides import (
HorizonsEphemerisService,
ConstantEphemerisService,
EphemerisService,
)
TimeInterval = Tuple[datetime, datetime]
def standard_finder_charts(
# arguments which are always required
mode: Mode,
output_format: OutputFormat,
# time interval
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
# ephemerides
ra: Optional[Quantity] = None,
dec: Optional[Quantity] = None,
min_magnitude: Optional[float] = None,
max_magnitude: Optional[float] = None,
bandpass: Optional[str] = None,
horizons_id: Optional[str] = None,
horizons_stepsize: Optional[Quantity] = None,
# image
survey: Survey = Survey.POSS2UKSTU_RED,
# instrument mode details
position_angle: Optional[Quantity] = None,
slitwidth: Optional[Quantity] = None,
mos_mask_rsmt: Optional[BinaryIO] = None,
# miscellaneous
basic_annotations: bool = False,
title: Optional[str] = None,
) -> Generator[BinaryIO, None, None]:
"""
Create standard SALT finder charts.
Some of the parameters are mutually exclusive. For example, it does mot make sense
to specify a slit width if you generate finding charts for imaging mode. In some
cases such combinations will raise an error, but in others some of the parameters
may just be ignored.
If no start time is given, the beginning of the current Julian day is assumed. If no
end time is given, the end of the current Julian day is assumed.
Parameters
----------
mode : Mode
Observation mode (such as imaging or MOS).
basic_annotations : bool
Whether only basic annotations should be added to the finder chart.
output_format : OutputFormat
Output format (such as PDF) to use for the generated finder charts.
start_time : datetime
Start time from which to generate finder charts.
end_time : datetime
End time until which to generate finder charts.
ra : Quantity
Right ascension of the finder chart center.
dec : Quantity
Declination of the finder chart center.
min_magnitude : float
Minimum magnitude of the target.
max_magnitude L: float
Maximum magnitude of the target.
bandpass : str
Bandpass (such as V) for the magnitudes,
horizons_id : str
Identifier for a target in the Horizons database.
horizons_stepsize : Quantity
Time between ephemerides queried from the Horizons service. The default is 5
minutes.
survey : Survey
The image survey from which the finder chart image shall be taken.
position_angle : Quantity
The position angle.
slitwidth : Quantity
The width of the longslit, as an angle.
mos_mask_rsmt : BinaryIO
Input stream containing an RSMT file for a MOS setup.
title : str
Title for the finder chart.
Returns
-------
Generator of BinaryIO
The finder charts as input streams.
"""
# time interval
# get default start and end time if need be
now = datetime.now(pytz.utc)
if not start_time:
start_time = julian_day_start(now)
if not end_time:
end_time = julian_day_end(now)
# ensure there are timezones
if start_time.tzinfo is None:
raise ValueError("The start time must be timezone-aware.")
if end_time.tzinfo is None:
raise ValueError("The end time must be timezone aware.")
# ephemerides
mos_mask: Optional[MOSMask] = None
if mode == Mode.MOS:
if mos_mask_rsmt is None:
raise ValueError(
"A RSMT file must be supplied if a finding chart is generated for MOS mode."
)
if ra or dec or position_angle:
raise ValueError(
"You must not supply a right ascension, declination or position angle in MOS mode, as they are taken from the MOS mask definition."
)
mos_mask = MOSMask(mos_mask_rsmt)
ra = mos_mask.right_ascension
dec = mos_mask.declination
position_angle = mos_mask.position_angle
if horizons_id:
# get ephemerides from Horizons
if ra is not None or dec is not None:
raise ValueError(
"No right ascension or declination must be supplied if a Horizons identifier is supplied."
)
if horizons_stepsize is None:
horizons_stepsize = 5 * u.minute
ephemeris_service: EphemerisService = HorizonsEphemerisService(
object_id=horizons_id,
start_time=start_time - timedelta(days=2),
end_time=end_time + timedelta(days=2),
stepsize=horizons_stepsize,
)
else:
# use ephemerides for a non-sidereal target
if ra is None:
raise ValueError("The right ascension is missing.")
if dec is None:
raise ValueError("The declination is missing.")
if min_magnitude is not None and (max_magnitude is None or bandpass is None):
raise ValueError(
"You must supply a maximum magnitude and bandpass if you supply a minimum magnitude."
)
if max_magnitude is not None and (min_magnitude is None or bandpass is None):
raise ValueError(
"You must supply a minimum magnitude and bandpass if you supply a maximum magnitude."
)
if bandpass is not None and (min_magnitude is None or max_magnitude is None):
raise ValueError(
"You must supply a minimum and maximum magnitude if you supply a bandpass."
)
magnitude_range: Optional[MagnitudeRange] = None
if (
min_magnitude is not None
and max_magnitude is not None
and bandpass is not None
):
magnitude_range = MagnitudeRange(
min_magnitude=min_magnitude,
max_magnitude=max_magnitude,
bandpass=bandpass,
)
ephemeris_service = ConstantEphemerisService(
ra=ra, dec=dec, magnitude_range=magnitude_range
)
# image
image_service = SurveyImageService(survey=survey)
# mode details
if mode is None:
raise ValueError("You must specify an instrument mode.")
if mode == Mode.IMAGING or mode == Mode.HRS:
mode_details: ModeDetails = ImagingModeDetails(position_angle)
elif mode == Mode.SLOT:
mode_details = SlotModeDetails(pa=position_angle)
elif mode == Mode.LONGSLIT:
if slitwidth is None:
raise ValueError(
"A slit width is required if a finding chart is generated for longslit mode."
)
mode_details = LongslitModeDetails(
slitwidth=slitwidth, pa=position_angle, center_ra=ra, center_dec=dec
)
elif mode == Mode.MOS:
if not mos_mask:
raise ValueError("No MOS mask has been supplied.")
mode_details = MOSModeDetails(mos_mask)
else:
raise ValueError(f"Mode unsupported: {mode.value}")
# output
if output_format == OutputFormat.PDF:
output = output_pdf
elif output_format == OutputFormat.PNG:
output = output_png
elif output_format == OutputFormat.SVG:
output = output_svg
else:
raise ValueError(f"Output format unsupported: {output_format.value}")
# generate the finder charts
return finder_charts(
mode_details=mode_details,
start_time=start_time,
end_time=end_time,
ephemeris_service=ephemeris_service,
image_service=image_service,
title=title,
basic_annotations=basic_annotations,
output=output,
) | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/standard_finder_charts.py | 0.950995 | 0.432243 | standard_finder_charts.py | pypi |
from abc import ABC
import enum
import io
from typing import TextIO
import urllib.parse
import urllib.request
from astropy.coordinates.angles import Angle
import astropy.io.fits as pyfits
from astropy.units import Quantity
from astropy import units as u
class Survey(enum.Enum):
"""
Image survey.
"""
POSS1_BLUE = "POSS1 Blue"
POSS1_RED = "POSS1 Red"
POSS2UKSTU_BLUE = "POSS2/UKSTU Blue"
POSS2UKSTU_IR = "POSS2/UKSTU IR"
POSS2UKSTU_RED = "POSS2/UKSTU Red"
TWO_MASS_H = "2MASS-H"
TWO_MASS_J = "2MASS-J"
TWO_MASS_K = "2MASS-K"
class ImageService(ABC):
"""
A generator of FITS HDU list objects to use for the image in a finder chart.
"""
def image(self, ra: Quantity, dec: Quantity) -> pyfits.HDUList:
"""
Generate a HDU list to use for the image in a finder chart.
Parameters
----------
ra : Quantity
Right ascension of the image centre.
dec : Quantity
Declination of the image centre.
Returns
-------
HDUList
The HDU list object for the image.
"""
raise NotImplementedError
def source(self) -> str:
"""
The name of the image source (such as a survey).
Returns
-------
str
The image source.
"""
raise NotImplementedError
class SurveyImageService(ImageService):
"""
Image service for getting an image using one of a set of image surveys.
Parameters
----------
survey : Survey
Image survey.
"""
STSCI_SURVEYS = [
Survey.POSS2UKSTU_RED,
Survey.POSS2UKSTU_BLUE,
Survey.POSS2UKSTU_IR,
Survey.POSS1_RED,
Survey.POSS1_BLUE,
]
SKY_VIEW_SURVEYS = [Survey.TWO_MASS_J, Survey.TWO_MASS_H, Survey.TWO_MASS_K]
def __init__(self, survey: Survey):
self.survey = survey
def image(self, ra: Quantity, dec: Quantity) -> pyfits.HDUList:
# grab 10' x 10' image from server and pull it into pyfits
if self.survey in SurveyImageService.STSCI_SURVEYS:
survey_identifiers = {
Survey.POSS2UKSTU_RED: "poss2ukstu_red",
Survey.POSS2UKSTU_BLUE: "poss2ukstu_blue",
Survey.POSS2UKSTU_IR: "poss2ukstu_ir",
Survey.POSS1_RED: "poss1_red",
Survey.POSS1_BLUE: "poss1_blue",
}
url = "https://archive.stsci.edu/cgi-bin/dss_search"
params = urllib.parse.urlencode(
{
"v": survey_identifiers[self.survey],
"r": "%f" % ra.to_value(u.deg),
"d": "%f" % dec.to_value(u.deg),
"e": "J2000",
"h": 10.0,
"w": 10.0,
"f": "fits",
"c": "none",
}
).encode("utf-8")
elif self.survey in SurveyImageService.SKY_VIEW_SURVEYS:
survey_identifiers = {
Survey.TWO_MASS_J: "2mass-j",
Survey.TWO_MASS_H: "2mass-h",
Survey.TWO_MASS_K: "2mass-k",
}
ra = Angle(ra)
dec = Angle(dec)
url = "https://skyview.gsfc.nasa.gov/current/cgi/runquery.pl"
params = urllib.parse.urlencode(
{
"Position": "'%d %d %f, %d %d %f'"
% (
round(ra.hms[0]),
ra.hms[1],
ra.hms[2],
round(dec.dms[0]),
abs(dec.dms[1]),
abs(dec.dms[2]),
),
"Survey": survey_identifiers[self.survey],
"Coordinates": "J2000",
"Return": "FITS",
"Pixels": 700,
"Size": 0.1667,
}
).encode("utf-8")
else:
raise Exception(f"Unsupported survey: {self.survey}")
fits_data = io.BytesIO()
data = urllib.request.urlopen(url, params).read()
fits_data.write(data)
fits_data.seek(0)
return pyfits.open(fits_data)
def source(self) -> str:
return str(self.survey.value)
class FITSImageService(ImageService):
"""
Image service for an image from a FITS file.
This class should be used with care, as it does not check whether the FITS file is
consistent with the right ascension abd declination passed to the image method. So
you may easily end up with completely nonsensical finder charts.
Parameters
----------
fits : text stream
Text stream containing the FITS file.
"""
def __init__(self, fits: TextIO):
self.hdu = pyfits.open(fits)
def image(self, ra: Quantity, dec: Quantity) -> pyfits.HDUList:
"""
Return the HDU list object from the FITS file.
Note that this method does not use the passed right ascension and declination
and that it does not check whether they are consistent with the image.
It is thus the user's responsibility to ensure that the FITS file and the given
right ascension and declination are consistent.
Parameters
----------
ra : Quantity
Right ascension.
dec : Quantity
Declination.
Returns
-------
HDUList
The HDU list object for the image.
"""
return self.hdu
def source(self) -> str:
return "User-supplied FITS" | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/image.py | 0.882769 | 0.547887 | image.py | pypi |
from typing import Optional, Tuple
from gilmenel import gilmenel
from gilmenel.instruments import BaseInstrument, Star
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.units import Quantity
MIN_RADIUS = 1 * u.arcmin
MAX_RADIUS = 3 * u.arcmin
MIN_MAG = 15
MAX_MAG = 18
MIN_STAR_SEPARATION = 10 * u.arcsec
def estimated_position_angle(
ra: Quantity,
dec: Quantity,
radius_range: Tuple[Quantity, Quantity] = (MIN_RADIUS, MAX_RADIUS),
mag_range: Tuple[float, float] = (MIN_MAG, MAX_MAG),
min_star_separation: Quantity = MIN_STAR_SEPARATION,
) -> Optional[Quantity]:
"""
Find a suitable position angle.
The GAIA star catalog is used to find a suitable star with which a slit can be
properly positioned, and the position angle of that star relative to the target is
returned.
Parameters
----------
ra : Quantity
Right ascension of the target, as an angle.
dec : Quantity
Declination of the target, as an angle.
radius_range : pair of Quantity
The inner and outer radius (as an angle) of the annulus in which a suitable may
be located.
mag_range : pair of float
The minimum (brightest) and maximum (faintest) magnitude a suitable star may
have.
min_star_separation : Quantity
The minimum angular distance a suitable star must have from neighbouring stars.
"""
# set up the con ditions for the Gaia star catalog search
instr = _build_position_angle_instrument(
radius_range=radius_range,
mag_range=mag_range,
min_star_separation=min_star_separation,
)
center = SkyCoord(ra, dec)
instr.point_to(center)
# search for stars matching the conditions
gilmenel.init()
stars = gilmenel.view_sky(instr)
matching_stars = gilmenel.find_best_stars(instr, stars)
# don't calculate a position angle if there is no suitable star
if len(matching_stars) == 0:
return None
# Find the best of the matching stars.
best_star = sorted(matching_stars, key=_sorting_key(radius_range, mag_range))[0]
best_star_coord = SkyCoord(best_star.ra, best_star.dec)
return center.position_angle(best_star_coord)
def _build_position_angle_instrument(
radius_range: Tuple[Quantity, Quantity],
mag_range: Tuple[float, float],
min_star_separation: Quantity,
) -> BaseInstrument:
"""
Create an "instrument" for the conditions stars must match for use in positioning a
slit.
Parameters
----------
radius_range : pair of Quantity
The inner and outer radius (as an angle) of the annulus in which a suitable may
be located.
mag_range : pair of float
The minimum (brightest) and maximum (faintest) magnitude a suitable star may
have.
min_star_separation : Quantity
The minimum angular distance a suitable star must have from neighbouring stars.
Returns
-------
BaseInstrument
The "instrument" with the search conditions.
"""
class _PositionAngleInstrument(BaseInstrument):
def best_stars(self, stars):
return [s for s in stars if s.merit >= 4]
instr = _PositionAngleInstrument(
name="PositionAngle",
instr_fov=radius_range[1],
inner_excl_distance=radius_range[0],
nearby_limit=min_star_separation,
bright_limit=mag_range[0],
faint_limit=mag_range[1],
)
return instr
def _sorting_key(
radius_range: Tuple[Quantity, Quantity], mag_range: Tuple[float, float]
):
"""
A key function for sorting stars.
The most suitable star comes first in a sorted list.
Parameters
----------
radius_range : pair of Quantity
The inner and outer radius (as an angle) of the annulus in which a suitable may
be located.
mag_range : pair of float
The minimum (brightest) and maximum (faintest) magnitude a suitable star may
have.
Returns
-------
float
A function which can be used for sorting.
"""
target_radius = radius_range[0]
target_magnitude = 0.5 * sum(mag_range)
return lambda star: abs(
(star.radius - target_radius).to_value(u.arcmin)
) + 0.2 * abs(star.g_mag - target_magnitude) | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/position_angle.py | 0.977143 | 0.58062 | position_angle.py | pypi |
from typing import Optional, BinaryIO
import astropy.units as u
import click
from datetime import datetime
import os
import pytz
from salt_finder_charts import standard_finder_charts, __version__
from salt_finder_charts.image import Survey
from salt_finder_charts.mode import Mode
from salt_finder_charts.output import OutputFormat
from salt_finder_charts.position_angle import (
estimated_position_angle,
MAX_MAG,
MAX_RADIUS,
MIN_MAG,
MIN_RADIUS,
MIN_STAR_SEPARATION,
)
from salt_finder_charts.util import julian_day_start, julian_day_end
@click.command()
@click.option("--bandpass", type=str, help="bandpass (such as V) for the magnitudes")
@click.option(
"--basename",
type=str,
default="FinderChart",
help="Basename for the saved finder chart files.",
)
@click.option("--basic-annotations", is_flag=True, help="add basic annotations only")
@click.option("--dec", type=float, help="declination of the finder chart center")
@click.option(
"--end-time",
type=click.DateTime(),
help="emd time until which to generate finder charts",
)
@click.option("--horizons-id", type=str, help="identifier for the Horizons service")
@click.option(
"--horizons-stepsize",
type=int,
default=5,
help="minutes between ephemerides queried from the Horizoms service",
)
@click.option("--max-magnitude", type=float, help="maximum magnitude of the target")
@click.option("--min-magnitude", type=float, help="minimum magnitude of the target")
@click.option(
"--mode",
type=click.Choice([mode.value for mode in Mode], case_sensitive=False),
required=True,
help="observation mode",
)
@click.option(
"--mos-mask-rsmt", type=click.File(mode="rb"), help="RSMT file defining a MOS mask"
)
@click.option(
"--output-dir",
type=click.Path(exists=True, file_okay=False, writable=True, resolve_path=True),
required=True,
help="directory where to save the generated finder chart files",
)
@click.option(
"--output-format",
type=click.Choice([of.value for of in OutputFormat], case_sensitive=False),
default="PDF",
help="output format of the generated finder chart files",
)
@click.option("--position-angle", type=float, help="position angle in degrees")
@click.option("--ra", type=float, help="right ascension of the finder chart center")
@click.option("--slitwidth", type=float, help="slit width in arcseconds")
@click.option(
"--start-time",
type=click.DateTime(formats=["%Y-%m-%d %H:%M:%S"]),
help="start time from when to generate finder charts",
)
@click.option(
"--survey",
type=click.Choice([survey.value for survey in Survey], case_sensitive=False),
default="POSS2/UKSTU Red",
help="survey to use for the finder chart image",
)
@click.option("--title", type=str, help="title for the finder chart")
@click.version_option(__version__)
def saltfc(
bandpass: Optional[str],
basename: str,
basic_annotations: bool,
dec: Optional[float],
end_time: Optional[datetime],
horizons_id: Optional[str],
horizons_stepsize: Optional[int],
max_magnitude: Optional[float],
min_magnitude: Optional[float],
mode: str,
mos_mask_rsmt: Optional[BinaryIO],
output_dir: str,
output_format: str,
position_angle: Optional[float],
ra: Optional[float],
slitwidth: Optional[float],
start_time: Optional[datetime],
survey,
title,
):
"""
Command for generating SALT finder charts.
By default, the finder charts are stored as files named FinderChart-1,
FinderChart-2, ... (with the correct file suffix according to the chosen output
format), but you can change the base name ("FindingChart") with the --name
parameter. No running number is added if only one finder chart is generated. The
target directory for the finder charts must be specified with the --dir parameter.
See the README file for a discussion of the various parameters.
"""
# start and end time
if start_time:
_start_time = pytz.utc.localize(start_time)
else:
_start_time = julian_day_start(datetime.now(pytz.utc))
if end_time:
_end_time = pytz.utc.localize(end_time)
else:
_end_time = julian_day_end(datetime.now(pytz.utc))
# finder chart center
_ra = ra * u.deg if ra is not None else None
_dec = dec * u.deg if dec else None
# position angle
_position_angle = position_angle * u.deg if position_angle is not None else None
# mode
_mode = [m for m in Mode if m.value.lower() == mode.lower()][0]
# output
_output_format = [
of for of in OutputFormat if of.value.lower() == output_format.lower()
][0]
# survey
_survey = [s for s in Survey if s.value.lower() == survey.lower()][0]
# slit width
_slitwidth = slitwidth * u.arcsec if slitwidth is not None else None
# Horizons query stepsize
_horizons_stepsize = (
horizons_stepsize * u.minute if horizons_stepsize is not None else None
)
# generate the finder charts
counter = 1
for fc in standard_finder_charts(
mode=_mode,
output_format=_output_format,
start_time=_start_time,
end_time=_end_time,
ra=_ra,
dec=_dec,
min_magnitude=min_magnitude,
max_magnitude=max_magnitude,
bandpass=bandpass,
horizons_id=horizons_id,
horizons_stepsize=_horizons_stepsize,
survey=_survey,
position_angle=_position_angle,
slitwidth=_slitwidth,
mos_mask_rsmt=mos_mask_rsmt,
basic_annotations=basic_annotations,
title=title,
):
filename = f"{basename}-{counter}.{_output_format.extension()}"
counter += 1
filepath = os.path.join(output_dir, filename)
with open(filepath, "wb") as f:
f.write(fc.read())
@click.command()
@click.option(
"--dec", type=float, required=True, help="declination of the target, in degrees"
)
@click.option("--max-mag", type=float, default=MAX_MAG, help="maximum magnitude")
@click.option(
"--max-radius",
type=float,
default=MAX_RADIUS.to_value(u.arcmin),
help="maximum radius, in arcminutes",
)
@click.option("--min-mag", type=float, default=MIN_MAG, help="minimum magnitude")
@click.option(
"--min-radius",
type=float,
default=MIN_RADIUS.to_value(u.arcmin),
help="minimum radius, in arcminutes",
)
@click.option(
"--min-separation",
type=float,
default=MIN_STAR_SEPARATION.to_value(u.arcsec),
help="minimum separation between stars, in arcseconds",
)
@click.option(
"--ra", type=float, required=True, help="right ascension of the target, in degrees"
)
def pa(ra: float, dec: float, min_radius, max_radius, min_mag, max_mag, min_separation):
"""
Calculate a suitable position angle for a target.
The position angle is the position angle of a star within an annulus between a
minimum and maximum radius. This star must have a magnitude between a minimum and
maximum magnitude, and it must have a minimum separation from neighbouring stars.
The position angle is output as an angle in degrees.
"""
print(
estimated_position_angle(
ra=ra * u.deg,
dec=dec * u.deg,
radius_range=(min_radius * u.arcmin, max_radius * u.deg),
mag_range=(min_radius, max_radius),
min_star_separation=min_separation * u.arcsec,
)
) | /salt_finder_charts-0.3.7-py3-none-any.whl/salt_finder_charts/cli.py | 0.845049 | 0.274944 | cli.py | pypi |
from typing import List, Optional
import re
from saltlint.utils import get_rule_skips_from_line, get_file_type
from saltlint.linter.match import Match
from saltlint.utils import LANGUAGE_SLS, LANGUAGE_JINJA
class Rule(object):
id: Optional[str] = None
shortdesc: Optional[str] = None
description: Optional[str] = None
languages: List[str] = []
match = None
matchtext = None
def __init__(self, config=None):
self.config = config
def __repr__(self):
return self.id + ": " + self.shortdesc
def verbose(self):
return self.id + ": " + self.shortdesc + "\n " + self.description
@staticmethod
def unjinja(text):
return re.sub(r"{{[^}]*}}", "JINJA_VAR", text)
def is_valid_language(self, file):
"""
Returns True if the file type is in the supported languages or no
language is specified for the linting rule and False otherwise.
The file type is determined based upon the file extension.
"""
if not self.languages or get_file_type(file["path"]) in self.languages:
return True
return False
def matchlines(self, file, text):
matches = []
if not self.match:
return matches
if not self.is_valid_language(file):
return matches
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for (prev_line_no, line) in enumerate(text.split("\n")):
if line.lstrip().startswith('#'):
continue
rule_id_list = get_rule_skips_from_line(line)
if self.id in rule_id_list:
continue
result = self.match(file, line)
if not result:
continue
message = None
if isinstance(result, str):
message = result
matches.append(Match(prev_line_no+1, line,
file['path'], self, message))
return matches
def matchfulltext(self, file, text):
matches = []
if not self.matchtext:
return matches
if not self.is_valid_language(file):
return matches
results = self.matchtext(file, text)
for line, section, message in results:
matches.append(Match(line, section, file['path'], self, message))
return matches
class JinjaRule(Rule):
languages = [LANGUAGE_SLS, LANGUAGE_JINJA]
tags = ['formatting', 'jinja']
# Regex for matching all escaped Jinja blocks in the text
jinja_escape_regex = re.compile(
r"{%[+-]?\s?raw\s?[+-]?%}.*{%[+-]?\s?endraw\s?[+-]?%}",
re.DOTALL | re.MULTILINE
)
def matchlines(self, file, text):
"""
Match the text line by line but ignore all escaped Jinja blocks, e.g.
content between {% raw %} and {% endraw %}.
Returns a list of Match objects.
"""
escaped_text = text
# Replace escaped Jinja blocks with the same number of empty lines
for match in self.jinja_escape_regex.finditer(text):
start = match.start()
end = match.end()
# Get the number of newlines in the escaped match
lines = text[start:end].splitlines()
num_of_lines = len(lines) - 1
# Replace escaped Jinja block in the escaped text by newlines to
# keep all the line numbers consistent
pre_text = escaped_text[:start]
post_text = escaped_text[end:]
newlines = '\n' * num_of_lines
escaped_text = pre_text + newlines + post_text
# Call the matchlines() on the parent class with the escaped text
matches = super().matchlines(file, escaped_text)
return matches
class DeprecationRule(Rule):
id: Optional[str] = None
state: Optional[str] = None
deprecated_since: Optional[str] = None
severity = 'HIGH'
languages = [LANGUAGE_SLS]
tags = ['deprecation']
@property
def shortdesc(self):
return "State '{}' is deprecated since SaltStack version '{}'".format(
self.state, self.deprecated_since
)
@property
def description(self):
return self.shortdesc
@property
def regex(self):
return re.compile(
r"^\s{2}" + self.state.replace(".", r"\.") + "(?=:|$)"
)
def match(self, file, line):
return self.regex.search(line)
class TypographicalErrorRule(Rule):
"""Base rule for catching common typographical errors."""
severity = 'LOW'
languages = [LANGUAGE_SLS]
tags = ['formatting']
def match(self, file, line):
return self.regex.search(line) | /salt_lint-0.9.2-py3-none-any.whl/saltlint/linter/rule.py | 0.862192 | 0.183447 | rule.py | pypi |
from dataclasses import dataclass, field
import itertools
from typing import Callable, Dict, List, Sequence, TypedDict, cast
from pygls.lsp import types
from salt_lsp.parser import (
AstNode,
Tree,
Optional,
IncludeNode,
IncludesNode,
StateParameterNode,
StateCallNode,
StateNode,
RequisiteNode,
RequisitesNode,
ExtendNode,
)
from salt_lsp.base_types import CompletionsDict
from salt_lsp.utils import ast_node_to_range
class DocumentSymbolKWArgs(TypedDict):
"""
Keyword arguments for the DocumentSymbol constructor that can be extracted
for every AstNode.
"""
name: str
range: types.Range
selection_range: types.Range
kind: types.SymbolKind
#: Dictionary containing functions that convert the AstNode subclasses to their
#: string identifier
NODE_IDENTIFIERS: Dict[str, Callable[[AstNode], Optional[str]]] = {
IncludeNode.__name__: lambda node: cast(IncludeNode, node).value,
IncludesNode.__name__: lambda node: "includes",
StateParameterNode.__name__: lambda node: cast(
StateParameterNode, node
).name,
StateCallNode.__name__: lambda node: cast(StateCallNode, node).name,
StateNode.__name__: lambda node: cast(StateNode, node).identifier,
RequisiteNode.__name__: lambda node: cast(RequisiteNode, node).module,
RequisitesNode.__name__: lambda node: cast(RequisitesNode, node).kind,
ExtendNode.__name__: lambda node: "extend",
}
def _get_doc_from_module_name(
node: AstNode,
state_completions: CompletionsDict,
) -> Optional[str]:
"""
This function returns the documentation of a StateCallNode or a
RequisiteNode from its name using the state_completions dictionary.
This function must not be used for other types of AstNode subclasses.
"""
assert isinstance(node, (RequisiteNode, StateCallNode))
mod_name = NODE_IDENTIFIERS[type(node).__name__](node)
if mod_name is None:
return None
if "." in mod_name:
mod_base_name, submod_name = mod_name.split(".")
completer = state_completions.get(mod_base_name)
if completer is None:
return None
submod_params = completer.state_params.get(submod_name)
return (
submod_params.documentation if submod_params is not None else None
)
completer = state_completions.get(mod_name)
return completer.state_docs if completer is not None else None
#: Dictionary of functions that return the documentation of a AstNode given the
#: AstNode and the CompletionsDict
DETAIL_OF_NODE_CONSTRUCTOR: Dict[
str, Callable[[AstNode, CompletionsDict], Optional[str]]
] = {
RequisiteNode.__name__: _get_doc_from_module_name,
StateCallNode.__name__: _get_doc_from_module_name,
ExtendNode.__name__: lambda n, c: """Extension of external SLS data.
See: https://docs.saltproject.io/en/latest/ref/states/extend.html
""",
IncludesNode.__name__: lambda n, c: """A list of included SLS files.
See also https://docs.saltproject.io/en/latest/ref/states/include.html
""",
RequisitesNode.__name__: lambda n, c: """List of requisites.
See also: https://docs.saltproject.io/en/latest/ref/states/requisites.html
""",
}
def get_children(
node: AstNode, state_completions: CompletionsDict
) -> List[types.DocumentSymbol]:
children: Sequence[AstNode] = []
if isinstance(node, IncludesNode):
children = node.includes
elif isinstance(
node, (ExtendNode, RequisitesNode, StateCallNode, StateNode)
):
children = node.get_children()
else:
return []
visitor = Visitor(state_completions=state_completions, recurse=False)
for child in children:
child.visit(visitor)
assert isinstance(visitor.document_symbols, list)
return visitor.document_symbols
def _document_symbol_init_kwargs(
node: AstNode,
) -> Optional[DocumentSymbolKWArgs]:
string_identifier = NODE_IDENTIFIERS.get(
type(node).__name__, lambda node: None
)(node)
symbol_kind = (
types.SymbolKind.String
if isinstance(node, IncludeNode)
else types.SymbolKind.Object
)
if string_identifier is None or node.start is None or node.end is None:
return None
lsp_range = ast_node_to_range(node)
assert lsp_range is not None
return {
"name": string_identifier,
"range": lsp_range,
"selection_range": types.Range(
start=node.start.to_lsp_pos(),
end=types.Position(
line=node.start.line,
character=node.start.col + len(string_identifier),
),
),
"kind": symbol_kind,
}
@dataclass
class Visitor:
"""
Stateful visitor that constructs the document symbols from a AstNode
utilizing the visit() function.
"""
#: The resulting document symbols created from the AstNode are saved in
#: this field
document_symbols: List[types.DocumentSymbol] = field(default_factory=list)
#: The visitor uses this attribute to obtain the documentation of certain
#: AstNodes.
state_completions: CompletionsDict = field(default_factory=dict)
#: This attribute specifies whether the visitor will be called on the
#: children of the AstNode.
#: By default this is not the case and should not be enabled, as LSP
#: clients will not be able to use the resulting document symbols list.
recurse: bool = False
def __call__(self, node: AstNode) -> bool:
kwargs = _document_symbol_init_kwargs(node)
if kwargs is None:
return self.recurse
self.document_symbols.append(
types.DocumentSymbol(
detail=DETAIL_OF_NODE_CONSTRUCTOR.get(
type(node).__name__, lambda n, c: None
)(node, self.state_completions)
or "",
children=get_children(node, self.state_completions),
**kwargs,
)
)
return self.recurse
def tree_to_document_symbols(
tree: Tree, state_completions: CompletionsDict
) -> List[types.DocumentSymbol]:
res = []
for elem in itertools.chain.from_iterable(
(
([tree.includes] if tree.includes else []),
([tree.extend] if tree.extend else []),
tree.states,
)
):
visitor = Visitor(state_completions=state_completions, recurse=False)
elem.visit(visitor)
res += visitor.document_symbols
return res | /salt_lsp-0.0.1.tar.gz/salt_lsp-0.0.1/salt_lsp/document_symbols.py | 0.859546 | 0.526891 | document_symbols.py | pypi |
from logging import getLogger, Logger, DEBUG
from pathlib import Path
from platform import python_version_tuple
from typing import List, Optional, Union
from pygls.lsp import types, InitializeResult
from pygls.protocol import LanguageServerProtocol
from pygls.workspace import Workspace
from salt_lsp.base_types import CompletionsDict, SLS_LANGUAGE_ID
from salt_lsp.utils import UriDict, FileUri, get_top
from salt_lsp.parser import parse, Tree
from salt_lsp.document_symbols import tree_to_document_symbols
if int(python_version_tuple()[1]) <= 8:
def is_relative_to(p1: Path, p2: Path) -> bool:
# stolen from CPython's source
"""Return True if the path is relative to another path or False."""
try:
p1.relative_to(p2)
return True
except ValueError:
return False
else:
def is_relative_to(p1: Path, p2: Path) -> bool:
return p1.is_relative_to(p2)
class SlsFileWorkspace(Workspace):
"""An extension of pygl's :ref:`Workspace` class that has additional
properties that are collected from the workspace.
It hooks into the :ref:`Workspace`'s update function to automatically keep
all properties up to date.
"""
def __init__(
self, state_name_completions: CompletionsDict, *args, **kwargs
) -> None:
#: dictionary containing the parsed contents of all tracked documents
self._trees: UriDict[Tree] = UriDict()
#: document symbols of all tracked documents
self._document_symbols: UriDict[List[types.DocumentSymbol]] = UriDict()
#: included FileUris of every tracked document
self._includes: UriDict[List[FileUri]] = UriDict()
#: top path corresponding to every workspace folder
self._top_paths: UriDict[Optional[FileUri]] = UriDict()
self._state_name_completions = state_name_completions
self.logger: Logger = getLogger(self.__class__.__name__)
# FIXME: make this configurable
self.logger.setLevel(DEBUG)
super().__init__(*args, **kwargs)
@property
def trees(self) -> UriDict[Tree]:
"""A dictionary which contains the parsed :ref:`Tree` for each document
tracked by the workspace.
"""
return self._trees
@property
def document_symbols(self) -> UriDict[List[types.DocumentSymbol]]:
"""The document symbols of each SLS files in the workspace."""
return self._document_symbols
@property
def includes(self) -> UriDict[List[FileUri]]:
"""The list of includes of each SLS file in the workspace."""
return self._includes
def _resolve_includes(
self, text_document_uri: Union[str, FileUri]
) -> None:
if (
(tree := self._trees[text_document_uri]) is None
or tree.includes is None
or len(tree.includes.includes) == 0
):
return
ws_folder = self._get_workspace_of_document(text_document_uri)
if (
ws_folder in self._top_paths
and self._top_paths[ws_folder] is not None
):
top_path = self._top_paths[ws_folder]
else:
top_path = ws_folder
assert top_path is not None
self._includes[text_document_uri] = [
FileUri(f)
for incl in tree.includes.includes
if (f := incl.get_file(FileUri(top_path).path)) is not None
]
new_includes = self._includes[text_document_uri]
# now try to re-read all the trees if they are not present:
for inc in self._includes[text_document_uri]:
if inc not in self._trees:
self.logger.debug(
"Adding file '%s' via includes of '%s'",
inc,
text_document_uri,
)
with open(inc.path, "r") as inc_file:
self.put_document(
types.TextDocumentItem(
uri=str(inc),
language_id=SLS_LANGUAGE_ID,
version=0,
text=inc_file.read(-1),
)
)
self._resolve_includes(inc)
if inc in self._trees and self._includes.get(inc):
new_includes += self._includes[inc]
assert len(new_includes) >= len(self._includes[text_document_uri])
self._includes[text_document_uri] = new_includes
def _update_document(
self,
text_document: Union[
types.TextDocumentItem, types.VersionedTextDocumentIdentifier
],
) -> None:
self.logger.debug("updating document '%s'", text_document.uri)
uri = text_document.uri
tree = parse(self.get_document(uri).source)
self._trees[uri] = tree
self._document_symbols[uri] = tree_to_document_symbols(
tree, self._state_name_completions
)
self._resolve_includes(text_document.uri)
def _get_workspace_of_document(self, uri: Union[str, FileUri]) -> FileUri:
for workspace in self._folders:
workspace_uri = workspace.uri
if is_relative_to(
Path(FileUri(uri).path), Path(FileUri(workspace_uri).path)
):
return workspace_uri
return self.root_uri
def add_folder(self, folder: types.WorkspaceFolder) -> None:
super().add_folder(folder)
top_path = get_top(FileUri(folder.uri).path)
self._top_paths[FileUri(folder.uri)] = (
FileUri(top_path) if top_path is not None else None
)
def remove_folder(self, folder_uri: Union[str, FileUri]) -> None:
super().remove_folder(str(folder_uri))
self._top_paths.pop(FileUri(folder_uri))
def update_document(
self,
text_document: types.VersionedTextDocumentIdentifier,
change: types.TextDocumentContentChangeEvent,
) -> None:
super().update_document(text_document, change)
self._update_document(text_document)
def remove_document(self, doc_uri: str) -> None:
super().remove_document(doc_uri)
self._document_symbols.pop(FileUri(doc_uri))
self._trees.pop(FileUri(doc_uri))
def put_document(self, text_document: types.TextDocumentItem) -> None:
super().put_document(text_document)
self._update_document(text_document)
class SaltLspProto(LanguageServerProtocol):
"""Custom protocol that replaces the workspace with a SlsFileWorkspace
instance.
"""
workspace: SlsFileWorkspace
def bf_initialize(self, *args, **kwargs) -> InitializeResult:
res = super().bf_initialize(*args, **kwargs)
ws = self.workspace
self.workspace = SlsFileWorkspace(
self._server._state_name_completions,
ws.root_uri,
self._server.sync_kind,
ws.folders.values(),
)
return res | /salt_lsp-0.0.1.tar.gz/salt_lsp-0.0.1/salt_lsp/workspace.py | 0.790773 | 0.329931 | workspace.py | pypi |
from __future__ import annotations
from collections.abc import MutableMapping
import os
import os.path
import shlex
import subprocess
from typing import (
Dict,
Generic,
Iterator,
List,
NewType,
Optional,
TypeVar,
Union,
)
from urllib.parse import urlparse, ParseResult
from pygls.lsp.types import Position, Range
import salt_lsp.parser as parser
from salt_lsp.parser import AstNode, Tree
def get_git_root(path: str) -> Optional[str]:
"""Get the root of the git repository to which `path` belongs.
If git is not installed or `path` is not in a git repository, then `None`
is returned.
"""
res = subprocess.run(
shlex.split("git rev-parse --show-toplevel"),
cwd=os.path.dirname(path) if not os.path.isdir(path) else path,
check=False,
capture_output=True,
)
if res.returncode == 0:
return str(res.stdout.strip(), encoding="utf-8")
else:
return None
def get_top(path: str) -> Optional[str]:
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "top.sls")):
return path
parent, tail = os.path.split(path)
if (tail == "" and parent == "/") or not parent:
return None
if os.path.isfile(os.path.join(parent, "top.sls")):
return parent
return get_top(parent)
def get_root(path: str) -> Optional[str]:
root = get_top(path)
return root or get_git_root(path)
def get_sls_includes(path: str) -> List[str]:
sls_files = []
top = get_root(path)
if not top:
return []
for root, _, files in os.walk(top):
base = root[len(top) + 1 :].replace(os.path.sep, ".")
sls_files += [
base + (file[:-4] if file != "init.sls" else "")
for file in files
if file.endswith(".sls")
]
return sls_files
def construct_path_to_position(tree: Tree, pos: Position) -> List[AstNode]:
found_node = None
parser_pos = parser.Position(line=pos.line, col=pos.character)
def visitor(node: AstNode) -> bool:
if parser_pos >= node.start and parser_pos < node.end:
nonlocal found_node
found_node = node
return True
tree.visit(visitor)
if not found_node:
return []
context: List[AstNode] = []
node: Optional[AstNode] = found_node
while node:
context.insert(0, node)
node = node.parent
return context
def position_to_index(text: str, line: int, column: int) -> int:
split = text.splitlines(keepends=True)
return sum([len(l) for i, l in enumerate(split) if i < line]) + column
T = TypeVar("T")
def get_last_element_of_iterator(iterator: Iterator[T]) -> Optional[T]:
"""
Returns the last element of from an iterator or None if the iterator is
empty.
"""
try:
*_, last = iterator
return last
except ValueError:
# empty iterator
return None
#: Type for URIs
Uri = NewType("Uri", str)
class FileUri:
"""Simple class for handling file:// URIs"""
def __init__(self, uri: Union[str, Uri, FileUri]) -> None:
self._parse_res: ParseResult = (
uri._parse_res if isinstance(uri, FileUri) else urlparse(uri)
)
if self._parse_res.scheme != "" and self._parse_res.scheme != "file":
raise ValueError(f"Invalid uri scheme {self._parse_res.scheme}")
if self._parse_res.scheme == "":
self._parse_res = urlparse("file://" + self._parse_res.path)
@property
def path(self) -> str:
return self._parse_res.path
def __str__(self) -> str:
return self._parse_res.geturl()
U = Union[Uri, FileUri, str]
class UriDict(Generic[T], MutableMapping):
"""Dictionary that stores elements assigned to paths which are then
transparently accessible via their Uri or the path or the FileUri.
"""
def __init__(self, *args, **kwargs):
self._data: Dict[str, T] = dict()
self.update(dict(*args, **kwargs))
def __getitem__(self, key: U) -> T:
return self._data[self._key_gen(key)]
def __setitem__(self, key: U, value: T) -> None:
self._data[self._key_gen(key)] = value
def __delitem__(self, key: U) -> None:
del self._data[self._key_gen(key)]
def __iter__(self):
return iter(self._data)
def __len__(self):
return len(self._data)
def _key_gen(self, key: U) -> str:
return str(FileUri(key))
def is_valid_file_uri(uri: str) -> bool:
"""Returns True if uri is a valid file:// URI"""
try:
FileUri(uri)
return True
except ValueError:
return False
def ast_node_to_range(node: AstNode) -> Optional[Range]:
"""
Converts a AstNode to a Range spanning from the node's starts to its end.
If the node's start or end are None, then None is returned.
"""
if node.start is None or node.end is None:
return None
return Range(start=node.start.to_lsp_pos(), end=node.end.to_lsp_pos()) | /salt_lsp-0.0.1.tar.gz/salt_lsp-0.0.1/salt_lsp/utils.py | 0.81571 | 0.261732 | utils.py | pypi |
from __future__ import annotations
import logging
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from os.path import abspath, dirname, exists, isdir, join
from typing import Any, Callable, List, Optional, Sequence, Tuple, Union, cast
from pygls.lsp import types
import yaml
from yaml.tokens import BlockEndToken, ScalarToken
log = logging.getLogger(__name__)
@dataclass
class Position:
"""
Describes a position in the document
"""
line: int
col: int
def __lt__(self, other):
if not isinstance(other, Position):
return NotImplemented
return (
self.line < other.line
or self.line == other.line
and self.col < other.col
)
def __gt__(self, other):
if not isinstance(other, Position):
return NotImplemented
return (
self.line > other.line
or self.line == other.line
and self.col > other.col
)
def __le__(self, other):
return self < other or self == other
def __ge__(self, other):
return self > other or self == other
def to_lsp_pos(self) -> types.Position:
"""Convert this position to pygls' native Position type."""
return types.Position(line=self.line, character=self.col)
@dataclass
class AstNode(ABC):
"""
Base class for all nodes of the Abstract Syntax Tree
"""
start: Optional[Position] = None
end: Optional[Position] = None
parent: Optional[AstNode] = field(compare=False, default=None, repr=False)
def visit(self: AstNode, visitor: Callable[[AstNode], bool]) -> None:
"""
Apply a visitor function to the node and apply it on children if the
function returns True.
"""
visitor(self)
class AstMapNode(AstNode, ABC):
"""
Base class for all nodes that are mappings
"""
@abstractmethod
def add(self: AstMapNode) -> AstNode:
"""
Abstract function to add an item
"""
raise NotImplementedError()
@abstractmethod
def get_children(self: AstMapNode) -> Sequence[AstNode]:
"""
Returns all the children nodes
"""
raise NotImplementedError()
def visit(self, visitor: Callable[[AstNode], bool]) -> None:
"""
Apply a visitor function to the node and apply it on children if the
function returns True.
"""
if visitor(self):
for child in self.get_children():
child.visit(visitor)
@dataclass
class IncludeNode(AstNode):
"""
Represents an item in the includes node
"""
value: Optional[str] = None
def get_file(self: IncludeNode, top_path: str) -> Optional[str]:
"""
Convert the dotted value of the include into a proper file path
based on the path of the top of the states folder.
:param top_path: the path to the top states folder
"""
if self.value is None:
return None
top_path = (
abs_top_path
if isdir(abs_top_path := abspath(top_path))
else dirname(abs_top_path)
)
dest = join(*self.value.split("."))
init_sls_path = join(top_path, dest, "init.sls")
entry_sls_path = join(top_path, f"{dest}.sls")
if exists(init_sls_path):
return init_sls_path
if exists(entry_sls_path):
return entry_sls_path
return None
@dataclass
class IncludesNode(AstNode):
"""
Node representing the list of includes
"""
includes: List[IncludeNode] = field(default_factory=list)
def add(self: IncludesNode) -> IncludeNode:
"""
Add a child node and return it.
"""
self.includes.append(IncludeNode())
return self.includes[-1]
@dataclass
class StateParameterNode(AstNode):
"""
Node representing a parameter of the state definition.
"""
name: Optional[str] = None
value: Any = None
def set_key(self: StateParameterNode, key: str) -> AstNode:
"""
Set the name of the parameter. If getting a requisites, tell the parent
to handle it and return the newly created node.
:return: the node that finally got the name
"""
requisites_keys = [
"require",
"onchanges",
"watch",
"listen",
"prereq",
"onfail",
"use",
]
all_requisites_keys = (
requisites_keys
+ [k + "_any" for k in requisites_keys]
+ [k + "_in" for k in requisites_keys]
)
if key in all_requisites_keys and isinstance(
self.parent, StateCallNode
):
return self.parent.convert(self, key)
self.name = key
return self
@dataclass
class RequisiteNode(AstNode):
"""
Node representing one requisite
"""
module: Optional[str] = None
reference: Optional[str] = None
def set_key(self: RequisiteNode, key: str) -> AstNode:
"""
Set the module of the requisite
:param key: the module to set
:return: the node that was updated
"""
self.module = key
return self
@dataclass
class RequisitesNode(AstMapNode):
"""
Node Representing the list of requisites of a state
"""
kind: Optional[str] = None
requisites: List[RequisiteNode] = field(default_factory=list)
def set_key(self: RequisitesNode, key: str) -> AstNode:
"""
Set the kind of the requisite
:param key: the kind to set
:return: the node that was updated
"""
self.kind = key
return self
def add(self: RequisitesNode) -> AstNode:
"""
Add a requisite entry to the tree, the key and value will come later
:return: the added node
"""
self.requisites.append(RequisiteNode(parent=self))
return self.requisites[-1]
def get_children(self: RequisitesNode) -> Sequence[AstNode]:
"""
Returns all the children nodes
"""
return self.requisites
@dataclass
class StateCallNode(AstMapNode):
"""
Node representing the state call part of the state definition.
For instance it represents the following part:
.. code-block:: yaml
file.managed:
- name: /etc/libvirt/libvirtd.conf
- source: salt://libvirt/libvirtd.conf
from this complete state definition:
.. code-block:: yaml
libvirt_config:
file.managed:
- name: /etc/libvirt/libvirtd.conf
- source: salt://libvirt/libvirtd.conf
"""
name: Optional[str] = None
parameters: List[StateParameterNode] = field(default_factory=list)
requisites: List[RequisitesNode] = field(default_factory=list)
def add(self: StateCallNode) -> AstNode:
"""
Add an entry to the tree, the key and value will come later
:return: the added node
"""
self.parameters.append(StateParameterNode(parent=self))
return self.parameters[-1]
def set_key(self: StateCallNode, key: str) -> AstNode:
"""
Set the name
"""
self.name = key
return self
def convert(
self: StateCallNode, param: StateParameterNode, name: str
) -> AstNode:
"""
Convert a parameter entry to a requisite one
"""
self.parameters.remove(param)
self.requisites.append(RequisitesNode(kind=name, parent=self))
self.requisites[-1].start = param.start
return self.requisites[-1]
def get_children(self: StateCallNode) -> Sequence[AstNode]:
"""
Returns all the children nodes
"""
return cast(List[AstNode], self.parameters) + cast(
List[AstNode], self.requisites
)
@dataclass
class StateNode(AstMapNode):
"""
Node representing a state definition like the following.
.. code-block:: yaml
libvirt_config:
file.managed:
- name: /etc/libvirt/libvirtd.conf
- source: salt://libvirt/libvirtd.conf
"""
identifier: Optional[str] = None
states: List[StateCallNode] = field(default_factory=list)
def add(self: StateNode) -> AstNode:
"""
Add a key token to the tree, the value will come later
:return: the added node
"""
self.states.append(StateCallNode(parent=self))
return self.states[-1]
def set_key(self: StateNode, key: str) -> AstNode:
"""
Set the identifier of the node. If the ikey is one of include or
extend, tell the parent to handle it.
:return: the node where the key has been set.
"""
if key in ["include", "extend"] and isinstance(self.parent, Tree):
return self.parent.convert(self, key)
self.identifier = key
return self
def get_children(self: StateNode) -> Sequence[AstNode]:
"""
Returns all the children nodes
"""
return self.states
@dataclass
class ExtendNode(AstMapNode):
"""
Node representing an ``extend`` declaration
"""
states: List[StateNode] = field(default_factory=list)
def add(self: ExtendNode) -> AstNode:
"""
Add a key token to the tree, the value will come later
:return: the added node
"""
self.states.append(StateNode(parent=self))
return self.states[-1]
def get_children(self: ExtendNode) -> Sequence[AstNode]:
"""
Returns all the children nodes
"""
return self.states
@dataclass
class Tree(AstMapNode):
"""
Node representing the whole SLS file
"""
includes: Optional[IncludesNode] = None
extend: Optional[ExtendNode] = None
states: List[StateNode] = field(default_factory=list)
def add(self: Tree) -> AstNode:
"""
Add a key token to the tree, the value will come later
:return: the added node
"""
self.states.append(StateNode(parent=self))
return self.states[-1]
def convert(self: Tree, state: StateNode, name: str) -> AstNode:
"""
Convert a child state node into the proper node type depending on the
name.
:param state: the state node to change
:param name: the name of the state node
:return: the state node if no change was needed or the newly created
node
"""
self.states.remove(state)
if name == "include":
self.includes = IncludesNode(parent=self)
self.includes.start = state.start
return self.includes
if name == "extend":
self.extend = ExtendNode(parent=self)
self.extend.start = state.start
return self.extend
return self
def get_children(self: Tree) -> Sequence[AstNode]:
"""
Returns all the children nodes
"""
includes = [self.includes] if self.includes else []
extend = [self.extend] if self.extend else []
return (
cast(List[AstNode], includes)
+ cast(List[AstNode], extend)
+ cast(List[AstNode], self.states)
)
@dataclass(init=False, eq=False)
class TokenNode(AstNode):
"""
Wrapper node for unprocessed yaml tokens
"""
token: yaml.Token = field(default_factory=lambda: yaml.Token(0, 0))
def __init__(self: TokenNode, token: yaml.Token) -> None:
super().__init__(
start=Position(
line=token.start_mark.line, col=token.start_mark.column
),
end=Position(line=token.end_mark.line, col=token.end_mark.column),
)
self.token = token
def __eq__(self, other):
if not isinstance(other, TokenNode) or not isinstance(
self.token, type(other.token)
):
return False
is_scalar = isinstance(self.token, yaml.ScalarToken)
scalar_equal = is_scalar and self.token.value == other.token.value
return super().__eq__(other) and (scalar_equal or not is_scalar)
class Parser:
"""
SLS file parser class
"""
def __init__(self: Parser, document: str) -> None:
"""
Create a parser object for an SLS file.
:param document: the content of the SLS file to parse
"""
self.document = document
self._tree = Tree()
self._breadcrumbs: List[AstNode] = [self._tree]
self._block_starts: List[
Tuple[
Union[
yaml.BlockMappingStartToken,
yaml.BlockSequenceStartToken,
yaml.FlowSequenceStartToken,
yaml.FlowMappingStartToken,
],
AstNode,
],
] = []
self._next_scalar_as_key = False
#: flag for _process_token that the preceding token was a ValueToken
#: => if applicable, the next token will be a value, unless a block is
#: started
self._next_token_is_value = False
self._unprocessed_tokens: Optional[List[TokenNode]] = None
self._last_start: Optional[Position] = None
def _process_token(self: Parser, token: yaml.Token) -> None:
"""
Process one token
"""
token_start = Position(
line=token.start_mark.line, col=token.start_mark.column
)
token_end = Position(
line=token.end_mark.line, col=token.end_mark.column
)
if isinstance(token, yaml.StreamStartToken):
self._tree.start = token_start
if isinstance(token, yaml.StreamEndToken):
self._tree.end = token_end
if isinstance(
token,
(
yaml.BlockMappingStartToken,
yaml.BlockSequenceStartToken,
yaml.FlowSequenceStartToken,
yaml.FlowMappingStartToken,
),
):
# Store which block start corresponds to what breadcrumb to help
# handling end block tokens
self._block_starts.append((token, self._breadcrumbs[-1]))
# a block is starting, so the next token cannot be a value, it will
# be a complex type instead
self._next_token_is_value = False
if isinstance(token, yaml.ValueToken):
self._next_token_is_value = True
if isinstance(token, yaml.ValueToken) and isinstance(
self._breadcrumbs[-1], StateParameterNode
):
if not self._unprocessed_tokens:
self._unprocessed_tokens = []
# We don't need to do anything else with this token,
# just flag the next tokens to be simply collected
return
if self._unprocessed_tokens is not None:
if not isinstance(
self._breadcrumbs[-1], StateParameterNode
) or not isinstance(token, yaml.BlockEndToken):
self._unprocessed_tokens.append(TokenNode(token=token))
if isinstance(
token,
(
yaml.BlockMappingStartToken,
yaml.BlockSequenceStartToken,
yaml.FlowSequenceStartToken,
yaml.FlowMappingStartToken,
),
):
self._breadcrumbs.append(self._unprocessed_tokens[-1])
# a block is starting, so the next token cannot be a value, it
# will be a complex type instead
self._next_token_is_value = False
if isinstance(
token,
(
yaml.BlockEndToken,
yaml.FlowSequenceEndToken,
yaml.FlowMappingEndToken,
),
):
if len(self._block_starts) == 0 or len(self._breadcrumbs) == 0:
log.error(
"Reached a %s but either no block starts "
"(len(self._block_starts) = %d) or no breadcrumbs "
"(len(self._breadcrumbs) = %d) are present",
type(token).__name__,
len(self._block_starts),
len(self._breadcrumbs),
)
return
last_start = self._block_starts.pop()
last = self._breadcrumbs.pop()
# pop breadcrumbs until we match the block starts
closed = last
while len(self._breadcrumbs) > 0 and closed != last_start[1]:
closed = self._breadcrumbs.pop()
closed.end = token_end
if not isinstance(last, TokenNode):
last.end = token_end
if (
isinstance(last, StateParameterNode)
and self._unprocessed_tokens is not None
):
if len(self._unprocessed_tokens) == 1 and isinstance(
self._unprocessed_tokens[0].token, yaml.ScalarToken
):
last.value = self._unprocessed_tokens[0].token.value
else:
for unprocessed in self._unprocessed_tokens:
unprocessed.parent = last
last.value = self._unprocessed_tokens
self._unprocessed_tokens = None
if self._unprocessed_tokens is not None:
# If self._unprocessed_tokens is set then we don't have
# Salt-specific data token to process
# reset the flag that the next token is a value, as the current
# token has now been put into self._unprocessed_tokens and will be
# taken care of in the next sweep
self._next_token_is_value = False
return
if isinstance(token, yaml.KeyToken):
self._next_scalar_as_key = True
if isinstance(
self._breadcrumbs[-1], AstMapNode
) and not isinstance(
self._breadcrumbs[-1], (RequisiteNode, StateParameterNode)
):
self._breadcrumbs.append(self._breadcrumbs[-1].add())
if self._last_start:
self._breadcrumbs[-1].start = self._last_start
self._last_start = None
else:
self._breadcrumbs[-1].start = token_start
if isinstance(token, yaml.BlockEntryToken):
# Create the state parameter, include and requisite before the dict
# since those are dicts in lists
same_level = (
len(self._breadcrumbs) > 0
and self._breadcrumbs[-1].start
and self._breadcrumbs[-1].start.col == token.start_mark.column
)
if same_level:
self._breadcrumbs.pop().end = token_start
if isinstance(
self._breadcrumbs[-1],
(StateCallNode, IncludesNode, RequisitesNode),
):
self._breadcrumbs.append(self._breadcrumbs[-1].add())
self._breadcrumbs[-1].start = token_start
if isinstance(token, yaml.ScalarToken):
if self._next_scalar_as_key and getattr(
self._breadcrumbs[-1], "set_key"
):
changed = getattr(self._breadcrumbs[-1], "set_key")(
token.value
)
# If the changed node isn't the same than the one we called the
# function on, that means that the node had to be converted and
# we need to update the breadcrumbs too.
if changed != self._breadcrumbs[-1]:
old = self._breadcrumbs.pop()
self._breadcrumbs.append(changed)
self._block_starts = [
(block[0], changed) if block[1] == old else block
for block in self._block_starts
]
self._next_scalar_as_key = False
else:
if isinstance(self._breadcrumbs[-1], IncludeNode):
self._breadcrumbs[-1].value = token.value
self._breadcrumbs[-1].end = token_end
self._breadcrumbs.pop()
if isinstance(self._breadcrumbs[-1], RequisiteNode):
self._breadcrumbs[-1].reference = token.value
# If the user hasn't typed the ':' yet, then the state
# parameter will come as a scalar
if (
isinstance(self._breadcrumbs[-1], StateParameterNode)
and self._breadcrumbs[-1].name is None
):
self._breadcrumbs[-1].name = token.value
if isinstance(self._breadcrumbs[-1], (StateNode, Tree)):
new_node = self._breadcrumbs[-1].add()
new_node.start = token_start
new_node.end = token_end
if getattr(new_node, "set_key"):
getattr(new_node, "set_key")(token.value)
# this scalar token is actually the plain value of the
# previous key and "a new thing" starts with the next token
# => pop the current breadcrumb as it is now processed
if self._next_token_is_value:
last = self._breadcrumbs.pop()
if last.end is None:
last.end = token_end
self._next_token_is_value = False
def parse(self) -> Tree:
"""
Generate the Abstract Syntax Tree for a ``jinja|yaml`` rendered SLS
file.
:return: the generated AST
:raises ValueException: for any other renderer but ``jinja|yaml``
"""
tokens = yaml.scan(self.document)
token = None
try:
for token in tokens:
log.debug(token)
self._process_token(token)
except yaml.scanner.ScannerError as err:
log.debug(err)
if token:
# Properly close the opened blocks
for node in reversed(self._breadcrumbs):
if node.start and err.context_mark.column < node.start.col:
self._process_token(
BlockEndToken(
start_mark=err.context_mark,
end_mark=err.context_mark,
)
)
elif (
node.start
and err.context_mark.column == node.start.col
):
self._process_token(
BlockEndToken(
start_mark=err.context_mark,
end_mark=err.context_mark,
)
)
value = self.document[
err.context_mark.index : err.problem_mark.index
].strip("\r\n")
error_token = ScalarToken(
value=value,
start_mark=err.context_mark,
end_mark=err.problem_mark,
plain=True,
style=None,
)
self._process_token(error_token)
else:
node.end = Position(
line=err.problem_mark.line,
col=err.problem_mark.column,
)
return self._tree
return self._tree
def parse(document: str) -> Tree:
"""
Generate the Abstract Syntax Tree for a ``jinja|yaml`` rendered SLS file.
:param document: the content of the SLS file to parse
:return: the generated AST
:raises ValueException: for any other renderer but ``jinja|yaml``
"""
return Parser(document).parse() | /salt_lsp-0.0.1.tar.gz/salt_lsp-0.0.1/salt_lsp/parser.py | 0.917187 | 0.431165 | parser.py | pypi |
import logging
import re
from os.path import basename
from typing import Dict, List, Optional, Sequence, Tuple, Union, cast
from pygls.capabilities import COMPLETION
from pygls.lsp import types
from pygls.lsp.methods import (
TEXT_DOCUMENT_DID_CHANGE,
TEXT_DOCUMENT_DID_CLOSE,
TEXT_DOCUMENT_DID_OPEN,
DEFINITION,
DOCUMENT_SYMBOL,
)
from pygls.lsp.types import (
CompletionItem,
CompletionList,
CompletionOptions,
CompletionParams,
)
from pygls.server import LanguageServer
import salt_lsp.utils as utils
from salt_lsp.base_types import StateNameCompletion, SLS_LANGUAGE_ID
from salt_lsp.workspace import SaltLspProto, SlsFileWorkspace
from salt_lsp.parser import (
IncludesNode,
RequisiteNode,
StateParameterNode,
Tree,
)
class SaltServer(LanguageServer):
"""Experimental language server for salt states"""
LINE_START_REGEX = re.compile(r"^(\s*)\b", re.MULTILINE)
def __init__(self) -> None:
super().__init__(protocol_cls=SaltLspProto)
self._state_name_completions: Dict[str, StateNameCompletion] = {}
self.logger: logging.Logger = logging.getLogger()
self._state_names: List[str] = []
@property
def workspace(self) -> SlsFileWorkspace:
assert isinstance(super().workspace, SlsFileWorkspace), (
"expected to get a 'SlsFileWorkspace', but got a "
f"'{super().workspace.__class__.__name__}' instead"
)
return cast(SlsFileWorkspace, super().workspace)
def post_init(
self,
state_name_completions: Dict[str, StateNameCompletion],
log_level=logging.DEBUG,
) -> None:
self._state_name_completions = state_name_completions
self._state_names = list(state_name_completions.keys())
self.logger = logging.getLogger(self.__class__.__name__)
self.logger.setLevel(log_level)
def complete_state_name(
self, params: types.CompletionParams
) -> List[Tuple[str, Optional[str]]]:
assert (
params.context is not None
and params.context.trigger_character == "."
)
doc = self.workspace.get_document(params.text_document.uri)
contents = doc.source
ind = doc.offset_at_position(params.position)
last_match = utils.get_last_element_of_iterator(
SaltServer.LINE_START_REGEX.finditer(contents, 0, ind)
)
if last_match is None:
self.logger.debug(
"expected to find whitespace before the position (%d, %d) "
"but got no regex match for the document: %s",
params.position.line,
params.position.character,
contents,
)
return []
state_name = contents[last_match.span()[1] : ind - 1]
if state_name in self._state_name_completions:
completer = self._state_name_completions[state_name]
return completer.provide_subname_completion()
return []
def find_id_in_doc_and_includes(
self, id_to_find: str, starting_uri: str
) -> Optional[types.Location]:
"""Finds the first matching location of the given id in the document or
in its includes.
This function searches for the `id_to_find` starting in
`starting_uri`. If it does not find it in there, then it will continue
to search in the includes and returns the first match that it finds.
"""
self.logger.debug(
"Request to find id '%s' starting in uri '%s'",
id_to_find,
starting_uri,
)
if (tree := self.workspace.trees.get(starting_uri)) is None:
self.logger.error(
"Cannot search in '%s', no tree present", starting_uri
)
return None
inc_of_uri = self.workspace.includes.get(starting_uri, [])
# FIXME: need to take ordering into account:
# https://docs.saltproject.io/en/latest/ref/states/compiler_ordering.html#the-include-statement
trees_and_uris_to_search: Sequence[
Tuple[Tree, Union[str, utils.FileUri]]
] = [(tree, starting_uri)] + [
(t, inc)
for inc in inc_of_uri
if (t := self.workspace.trees.get(inc)) is not None
]
for tree, uri in trees_and_uris_to_search:
self.logger.debug("Searching in '%s'", uri)
matching_states = [
state
for state in tree.states
if state.identifier == id_to_find
]
if len(matching_states) != 1:
continue
if (
lsp_range := utils.ast_node_to_range(matching_states[0])
) is not None:
self.logger.debug(
"found match at '%s', '%s", lsp_range.start, lsp_range.end
)
return types.Location(
uri=str(uri),
range=lsp_range,
)
return None
def setup_salt_server_capabilities(server: SaltServer) -> None:
"""Adds the completion, goto definition and document symbol capabilities to
the provided server.
"""
@server.feature(
COMPLETION, CompletionOptions(trigger_characters=["-", "."])
)
def completions(
salt_server: SaltServer, params: CompletionParams
) -> Optional[CompletionList]:
"""Returns completion items."""
if (
params.context is not None
and params.context.trigger_character == "."
):
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label=sub_name, documentation=docs)
for sub_name, docs in salt_server.complete_state_name(
params
)
],
)
if (
tree := salt_server.workspace.trees.get(params.text_document.uri)
) is None:
return None
path = utils.construct_path_to_position(tree, params.position)
if (
path
and isinstance(path[-1], IncludesNode)
or basename(params.text_document.uri) == "top.sls"
and isinstance(path[-1], StateParameterNode)
):
file_path = utils.FileUri(params.text_document.uri).path
includes = utils.get_sls_includes(file_path)
return CompletionList(
is_incomplete=False,
items=[
CompletionItem(label=f" {include}") for include in includes
],
)
return None
@server.feature(DEFINITION)
def goto_definition(
salt_server: SaltServer, params: types.DeclarationParams
) -> Optional[types.Location]:
uri = params.text_document.uri
if (tree := salt_server.workspace.trees.get(uri)) is None:
return None
path = utils.construct_path_to_position(tree, params.position)
# Going to definition is only handled on requisites ids
if not isinstance(path[-1], RequisiteNode):
return None
if (id_to_find := cast(RequisiteNode, path[-1]).reference) is None:
return None
return salt_server.find_id_in_doc_and_includes(id_to_find, uri)
@server.feature(TEXT_DOCUMENT_DID_CHANGE)
def on_did_change(
salt_server: SaltServer, params: types.DidChangeTextDocumentParams
):
for change in params.content_changes:
# check that this is a types.TextDocumentContentChangeEvent
if hasattr(change, "range"):
assert isinstance(change, types.TextDocumentContentChangeEvent)
salt_server.workspace.update_document(
params.text_document, change
)
@server.feature(TEXT_DOCUMENT_DID_CLOSE)
def did_close(
salt_server: SaltServer, params: types.DidCloseTextDocumentParams
):
"""Text document did close notification."""
salt_server.workspace.remove_document(params.text_document.uri)
@server.feature(TEXT_DOCUMENT_DID_OPEN)
def did_open(
salt_server: SaltServer, params: types.DidOpenTextDocumentParams
) -> Optional[types.TextDocumentItem]:
"""Text document did open notification.
This function registers the newly opened file with the salt server.
"""
salt_server.logger.debug(
"adding text document '%s' to the workspace",
params.text_document.uri,
)
salt_server.workspace.put_document(params.text_document)
doc = salt_server.workspace.get_document(params.text_document.uri)
return types.TextDocumentItem(
uri=params.text_document.uri,
language_id=SLS_LANGUAGE_ID,
text=params.text_document.text or "",
version=doc.version,
)
@server.feature(DOCUMENT_SYMBOL)
def document_symbol(
salt_server: SaltServer, params: types.DocumentSymbolParams
) -> Optional[
Union[List[types.DocumentSymbol], List[types.SymbolInformation]]
]:
return salt_server.workspace._document_symbols.get(
params.text_document.uri, []
) | /salt_lsp-0.0.1.tar.gz/salt_lsp-0.0.1/salt_lsp/server.py | 0.762336 | 0.340677 | server.py | pypi |
import datetime # pragma: nocover
import os # pragma: nocover
import sys # pragma: nocover
import subprocess # pragma: nocover
import platform # pragma: nocover
VERSION = (0, 2, 7, "beta", 1)
def get_git_changeset(): # pragma: nocover
"""Returns a numeric identifier of the latest git changeset.
The result is the UTC timestamp of the changeset in YYYYMMDDHHMMSS format.
This value isn't guaranteed to be unique, but collisions are very
unlikely, so it's sufficient for generating the development version
numbers.
"""
repo_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
git_log = subprocess.Popen(
"git log --pretty=format:%ct --quiet -1 HEAD",
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=repo_dir,
universal_newlines=True,
)
timestamp = git_log.communicate()[0]
try:
timestamp = datetime.datetime.utcfromtimestamp(int(timestamp))
except ValueError: # pragma: nocover
return None # pragma: nocover
return timestamp.strftime("%Y%m%d%H%M%S")
def cement_get_version(version): # pragma: nocover
"Returns a PEP 386-compliant version number from VERSION."
assert len(version) == 5
assert version[3] in ("alpha", "beta", "rc", "final")
# Now build the two parts of the version number:
# main = X.Y[.Z]
# sub = .devN - for pre-alpha releases
# | {a|b|c}N - for alpha, beta and rc releases
# We want to explicitly include all three version/release numbers
# parts = 2 if version[2] == 0 else 3
parts = 3
main = ".".join(str(x) for x in version[:parts])
sub = ""
if version[3] == "alpha" and version[4] == 0:
git_changeset = get_git_changeset()
if git_changeset:
sub = ".dev%s" % git_changeset
elif version[3] != "final":
mapping = {"alpha": "a", "beta": "b", "rc": "c"}
sub = mapping[version[3]] + str(version[4])
return main + sub
def get_version(version=VERSION):
return cement_get_version(version) | /salt-packets-0.2.7b1.tar.gz/salt-packets-0.2.7b1/salt_packets/core/version.py | 0.424651 | 0.23467 | version.py | pypi |
class PepperRetcode(object):
'''
Validation container
'''
def validate(self, options, result):
'''
Validate result dictionary retcode values.
:param options: optparse options
:param result: dictionary from Saltstack master
:return: exit code
'''
if options.fail_any:
return self.validate_fail_any(result)
if options.fail_any_none:
return self.validate_fail_any_none(result)
if options.fail_all:
return self.validate_fail_all(result)
if options.fail_all_none:
return self.validate_fail_all_none(result)
return 0
@staticmethod
def validate_fail_any(result):
'''
Validate result dictionary retcode values.
Returns 0 if no retcode keys.
Returns first non zero retcode if any of recodes is non zero.
:param result: dictionary from Saltstack master
:return: exit code
'''
if isinstance(result, list):
if isinstance(result[0], dict):
minion = result[0]
retcodes = list(minion[name].get('retcode')
for name in minion if isinstance(minion[name], dict) and
minion[name].get('retcode') is not None)
return next((r for r in retcodes if r != 0), 0)
return 0
@staticmethod
def validate_fail_any_none(result):
'''
Validate result dictionary retcode values.
Returns -1 if no retcode keys.
Returns first non zero retcode if any of recodes is non zero.
:param result: dictionary from Saltstack master
:return: exit code
'''
if isinstance(result, list):
if isinstance(result[0], dict):
minion = result[0]
retcodes = list(minion[name].get('retcode')
for name in minion if isinstance(minion[name], dict) and
minion[name].get('retcode') is not None)
if not retcodes:
return -1 # there are no retcodes
return next((r for r in retcodes if r != 0), 0)
return -1
@staticmethod
def validate_fail_all(result):
'''
Validate result dictionary retcode values.
Returns 0 if no retcode keys.
Returns first non zero retcode if all recodes are non zero.
:param result: dictionary from Saltstack master
:return: exit code
'''
if isinstance(result, list):
if isinstance(result[0], dict):
minion = result[0]
retcodes = list(minion[name].get('retcode')
for name in minion if isinstance(minion[name], dict) and
minion[name].get('retcode') is not None)
if all(r != 0 for r in retcodes):
return next((r for r in retcodes if r != 0), 0)
return 0
@staticmethod
def validate_fail_all_none(result):
'''
Validate result dictionary retcode values.
Returns -1 if no retcode keys.
Returns first non zero retcode if all recodes are non zero.
:param result: dictionary from Saltstack master
:return: exit code
'''
if isinstance(result, list):
if isinstance(result[0], dict):
minion = result[0]
retcodes = list(minion[name].get('retcode')
for name in minion if isinstance(minion[name], dict) and
minion[name].get('retcode') is not None)
if not retcodes:
return -1 # there are no retcodes
if all(r != 0 for r in retcodes):
return next((r for r in retcodes if r != 0), 0)
else:
return 0
return -1 | /salt-pepper-0.7.6.tar.gz/salt-pepper-0.7.6/pepper/retcode.py | 0.726426 | 0.343342 | retcode.py | pypi |
from bowler import Query
from bowler import SYMBOL
from bowler import TOKEN
from fissix.fixer_util import find_indentation
from fissix.fixer_util import Name
from fissix.pytree import Leaf
from fissix.pytree import Node
def rewrite(paths, interactive=False, silent=False):
"""
Rewrite the passed in paths
"""
(
Query(paths)
.select_module("tornado")
.filter(filter_tornado_imports)
.rename("salt.ext.tornado")
.select_root()
.select("classdef|funcdef")
.filter(filter_not_decorated)
.modify(replace_decorators)
.execute(write=True, interactive=interactive, silent=silent)
)
def filter_tornado_imports(node, capture, filename):
"""
Filter tornado imports
"""
for leaf in capture["node"].leaves():
if leaf.value == "tornado":
return True
def _get_decorator(node):
"""
Don't modify classes or test methods that aren't decorated with ``@tornado``
"""
if node.parent.type == SYMBOL.decorated:
child = node.parent.children[0]
if child.type == SYMBOL.decorator:
decorators = [child]
elif child.type == SYMBOL.decorators:
decorators = child.children
else:
raise NotImplementedError
for decorator in decorators:
name = decorator.children[1]
assert name.type in {TOKEN.NAME, SYMBOL.dotted_name}
if str(name).startswith("tornado."):
return decorator
def filter_not_decorated(node, capture, filename):
"""
Filter undecorated nodes
"""
return bool(_get_decorator(node))
def get_decorator_name(decorator):
"""
Returns the name of the decorator
"""
name = decorator.children[1]
assert name.type in {TOKEN.NAME, SYMBOL.dotted_name}
return str(name)
def replace_decorators(node, capture, filename):
"""
Replaces usage of ``@tornado.<etc>`` with ``@salt.ext.tornado.<etc>``
"""
indent = find_indentation(node)
decorator = _get_decorator(node)
decorator.remove()
decorated = Node(
SYMBOL.decorated,
[
Node(
SYMBOL.decorator,
[
Leaf(TOKEN.AT, "@"),
Name(f"salt.ext.{get_decorator_name(decorator)}"),
Leaf(TOKEN.NEWLINE, "\n"),
],
)
],
prefix=decorator.prefix,
)
node.replace(decorated)
decorated.append_child(node)
if indent is not None:
node.prefix = indent
else:
node.prefix = "" | /salt-rewrite-2.4.4.tar.gz/salt-rewrite-2.4.4/src/saltrewrite/imports/fix_tornado_imports.py | 0.637369 | 0.205376 | fix_tornado_imports.py | pypi |
import logging
import os
import sys
import warnings
from shaker.libs import logger
from shaker.libs import metadata
from shaker.libs import pygit2_utils
from shaker_metadata import ShakerMetadata
from shaker_remote import ShakerRemote
from shaker.libs.errors import ShakerRequirementsUpdateException
class Shaker(object):
"""
Shaker takes in a metadata yaml file and uses this to resolve a set
of dependencies into a pinned and versioned set in a
formula-requirements.txt file. This can then be used to synchronise
a set of salt-formulas with remote versions pinned to the specified
versions.
Starting from a root formula and calculate all necessary dependencies,
based on metadata stored in each formula.
-
Salt Shaker works by creating an extra file root that must be copied up to
your salt server and added to the master config.
The formula-requirements.txt file
---------------------------------
The format of the file is simply a list of git-cloneable urls with an
optional revision specified on the end. At the moment the only form a
version comparison accepted is `==`. The version can be a tag, a branch
name or anything that ``git rev-parse`` understands (i.e. a plain sha or
the output of ``git describe`` such as ``v0.2.2-1-g1b520c5``).
Example::
git@github.com:ministryofjustice/ntp-formula.git==v1.2.3
git@github.com:ministryofjustice/repos-formula.git==my_branch
git@github.com:ministryofjustice/php-fpm-formula.git
git@github.com:ministryofjustice/utils-formula.git
git@github.com:ministryofjustice/java-formula.git
git@github.com:ministryofjustice/redis-formula.git==v0.2.2-1-g1b520c5
git@github.com:ministryofjustice/logstash-formula.git
git@github.com:ministryofjustice/sensu-formula.git
git@github.com:ministryofjustice/rabbitmq-formula.git
git@github.com:saltstack-formulas/users-formula.git
"""
def __init__(self, root_dir, salt_root_path='vendor',
clone_path='formula-repos', salt_root='_root'):
"""
Initialise application paths and collect together the
metadata
Args:
root_dir(string): The root directory to use
salt_root_dir(string): The directory to use for the salt
root
clone_path(string): The directory to put formula into
salt_root(string): The directory to link formula into
"""
# Run sanity checks on pygit2
pygit2_utils.pygit2_check()
self.roots_dir = os.path.join(root_dir, salt_root_path, salt_root)
self.repos_dir = os.path.join(root_dir, salt_root_path, clone_path)
self._root_dir = root_dir
self._shaker_metadata = ShakerMetadata(root_dir)
def install_requirements(self,
simulate=False,
enable_remote_check=False):
"""
simulate(bool): True to only simulate the run,
false to carry it through for real
enable_remote_check(bool): True to enable remote
checks when installing pinned versions
"""
logger.Logger().info("Shaker::install_requirements: "
"Installing pinned requirements..."
"dependencies will be installed "
"from the stored formula requirements")
self._load_local_requirements()
self._install_versioned_requirements(overwrite=False,
simulate=simulate,
enable_remote_check=enable_remote_check)
def update_requirements(self,
simulate=False):
"""
Update the formula-requirements from the metadata,
then install them
Args:
simulate(bool): True to only simulate the run,
false to carry it through for real
"""
logger.Logger().info("Shaker::install_requirements: "
"Updating and Installing requirements..."
"all dependencies will be "
"re-calculated from the metadata")
self._update_local_requirements()
self._install_versioned_requirements(overwrite=True,
simulate=simulate,
enable_remote_check=True)
def check_requirements(self):
"""
Check the current formula-requirements against those that
would be generated from the metadata,
"""
logger.Logger().info("Shaker::check_requirements: "
"Checking the current requirements "
"against an update")
self._load_local_requirements(enable_remote_check=True)
current_requirements = self._shaker_remote.get_requirements()
self._update_local_requirements()
new_requirements = self._shaker_remote.get_requirements()
requirements_diff = metadata.compare_requirements(current_requirements,
new_requirements)
if len(requirements_diff) == 0:
logger.Logger().info("Shaker::check_requirements: "
"No formula requirements changes found")
else:
for requirement_pair in requirements_diff:
first_entry = requirement_pair[0]
second_entry = requirement_pair[1]
if len(first_entry) == 0:
logger.Logger().info("Shaker::check_requirements: "
"New entry %s"
% (second_entry))
elif len(second_entry) == 0:
logger.Logger().info("Shaker::check_requirements: "
"Deprecated entry %s"
% (first_entry))
else:
logger.Logger().info("Shaker::check_requirements: "
"Unequal entries %s != %s"
% (first_entry,
second_entry))
return requirements_diff
def _load_local_requirements(self,
enable_remote_check=False):
"""
Load the requirements file and update the remote dependencies
Args:
enable_remote_check(bool): False to use current formula without checking
remotely for updates. True to use remote repository API to
recalculate shas
"""
logger.Logger().info("Shaker: Loading the current formula requirements...")
self._shaker_remote = ShakerRemote(self._shaker_metadata.local_requirements)
if enable_remote_check:
logger.Logger().info("Shaker: Updating the current formula requirements "
"dependencies...")
self._shaker_remote.update_dependencies()
def _update_local_requirements(self):
"""
Update the requirements from metadata entries, overriding the
current formula requirements
"""
logger.Logger().info("Shaker: Updating the formula requirements...")
self._shaker_metadata.update_dependencies(ignore_local_requirements=True)
self._shaker_remote = ShakerRemote(self._shaker_metadata.dependencies)
self._shaker_remote.update_dependencies()
def _install_versioned_requirements(self,
overwrite=False,
simulate=False,
enable_remote_check=False
):
"""
Install all of the versioned requirements found
Args:
overwrite(bool): True to overwrite dependencies,
false otherwise
simulate(bool): True to only simulate the run,
false to carry it through for real
enable_remote_check(bool): False to use current formula without checking
remotely for updates. True to use remote repository API to
recalculate shas
"""
if not simulate:
if enable_remote_check:
logger.Logger().info("Shaker::install_requirements: Updating requirements tag target shas")
self._shaker_remote.update_dependencies()
else:
logger.Logger().info("Shaker::install_requirements: No remote check, not updating tag target shas")
logger.Logger().info("Shaker::install_requirements: Installing requirements...")
successful, unsuccessful = self._shaker_remote.install_dependencies(overwrite=overwrite,
enable_remote_check=enable_remote_check)
# If we have unsuccessful updates, then we should fail before writing the requirements file
if unsuccessful > 0:
msg = ("Shaker::install_requirements: %s successful, %s failed"
% (successful, unsuccessful))
raise ShakerRequirementsUpdateException(msg)
if enable_remote_check:
logger.Logger().info("Shaker: Writing requirements file...")
self._shaker_remote.write_requirements(overwrite=True, backup=False)
else:
requirements = '\n'.join(self._shaker_remote.get_requirements())
logger.Logger().warning("Shaker: Simulation mode enabled, "
"no changes will be made...\n%s\n\n"
% (requirements))
def _setup_logging(level):
"""
Initialise the default application logging
Args:
level(logging.LEVEL): The level to set
logging at
"""
logger.Logger('salt-shaker')
logger.Logger().setLevel(level)
def shaker(root_dir='.',
debug=False,
verbose=False,
pinned=False,
simulate=False,
check_requirements=False,
enable_remote_check=False):
"""
Utility task to initiate Shaker, setting up logging and
running the neccessary commands to install requirements
Args:
root_dir(string): The root directory to use
debug(bool): Enable/disable debugging output
verbose(bool): Enable/disable verbose output
pinned(bool): True to use pinned requirements,
False to use metadata to recalculate
requirements
simulate(bool): True to only simulate the run,
false to carry it through for real
check_requirements(bool): True to compare
a remote dependency check with the current
formula requirements
enable_remote_check(bool): True to enable remote
checks when installing pinned versions
"""
if (debug):
_setup_logging(logging.DEBUG)
elif (verbose):
_setup_logging(logging.INFO)
else:
_setup_logging(logging.INFO)
if not os.path.exists(root_dir):
os.makedirs(root_dir, 0755)
shaker_instance = Shaker(root_dir=root_dir)
if check_requirements:
shaker_instance.check_requirements()
elif pinned:
shaker_instance.install_requirements(simulate=simulate,
enable_remote_check=enable_remote_check)
else:
shaker_instance.update_requirements(simulate=simulate)
def get_deps(root_dir, root_formula=None, constraint=None, force=False):
"""
(DEPRECATED) Update the formula-requirements from the metadata.yaml,
then install them
Args:
simulate(bool): True to only simulate the run,
false to carry it through for real
"""
# This filterwarning makes sure we always see *this* log message
warnings.filterwarnings("once", "shaker\.salt_shaker\.get_deps.*", DeprecationWarning)
# Then issue a warning form the caller's perspective
warnings.warn("shaker.salt_shaker.get_deps has been deprecated. Use `shaker.salt_shaker.shaker(root_dir=...)` instead", DeprecationWarning, stacklevel=2)
return shaker(root_dir, pinned=not force, enable_remote_check=True) | /salt-shaker-1.0.4.tar.gz/salt-shaker-1.0.4/shaker/salt_shaker.py | 0.550366 | 0.314998 | salt_shaker.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.