code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import json
import math
import requests
import pandas as pd
def fetch_data(ids):
'''
A function to fetch data from the API.
Parameters:
ids (list): A list of ids (integrs) to fetch
Returns:
text (dict): A dictionary where keys are the ids and values are the text
'''
results = {}
# We'll loop over the ids to fetch the text data
# We'll split ids into 1000 because of the limit of the API
# Futrue work:
# we can handle if the connection timed out or any other problem that would happen
# we can add some assertion to make sure that ids are valid
for i in range(math.ceil(len(ids)/1000)):
sub_ids = json.dumps(ids[i*1000:1000*(i+1)])
while True:
r = requests.post("https://recruitment.aimtechnologies.co/ai-tasks", sub_ids)
# print(r.status_code)
if r.status_code == 200:
results.update(json.loads(r.text))
break;
print(f"We managed to fetch {len(results)} samples of text.")
return results
if __name__ == '__main__':
#Read the ids' file, then fetch data, and write the file to a csv
source_data = pd.read_csv("files/dialect_dataset.csv")
text_dict = fetch_data(list(source_data.loc[:,"id"].astype(str)))
#We'll make sure that we managed to fetch all the ids
if len(source_data) == len(text_dict):
source_data.loc[:,"text"] = text_dict.values()
source_data.to_csv("data/full_dialect_dataset.csv",encoding='utf-8-sig') | [
"requests.post",
"json.loads",
"json.dumps",
"pandas.read_csv"
] | [((1216, 1256), 'pandas.read_csv', 'pd.read_csv', (['"""files/dialect_dataset.csv"""'], {}), "('files/dialect_dataset.csv')\n", (1227, 1256), True, 'import pandas as pd\n'), ((720, 760), 'json.dumps', 'json.dumps', (['ids[i * 1000:1000 * (i + 1)]'], {}), '(ids[i * 1000:1000 * (i + 1)])\n', (730, 760), False, 'import json\n'), ((791, 864), 'requests.post', 'requests.post', (['"""https://recruitment.aimtechnologies.co/ai-tasks"""', 'sub_ids'], {}), "('https://recruitment.aimtechnologies.co/ai-tasks', sub_ids)\n", (804, 864), False, 'import requests\n'), ((968, 986), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (978, 986), False, 'import json\n')] |
from booking.models import Schedule, ParkingSpace
from datetime import datetime as dt
from django import forms
class ReservingForm(forms.ModelForm):
reserving_dates = forms.ModelMultipleChoiceField(
queryset=Schedule.objects.filter(reserving_date__gte=dt.today()),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = Schedule
fields = ('reserving_dates',)
class EditReservingForm(forms.ModelForm):
reserving_dates = forms.ModelMultipleChoiceField(
queryset=Schedule.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = Schedule
fields = ('reserving_dates',)
class CreationScheduleForm(forms.ModelForm):
class Meta:
model = Schedule
fields = ('space', 'reserving_date',)
class DeletionScheduleForm(forms.ModelForm):
deleting_dates = forms.ModelMultipleChoiceField(
queryset=Schedule.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = Schedule
fields = ('deleting_dates',)
class CreationSpaceForm(forms.ModelForm):
class Meta:
model = ParkingSpace
fields = ('title', 'slug',)
class DeletionSpaceForm(forms.ModelForm):
deleting_spaces = forms.ModelMultipleChoiceField(
queryset=ParkingSpace.objects.all(),
widget=forms.CheckboxSelectMultiple(),
)
class Meta:
model = ParkingSpace
fields = ('deleting_spaces',) | [
"datetime.datetime.today",
"booking.models.Schedule.objects.all",
"booking.models.ParkingSpace.objects.all",
"django.forms.CheckboxSelectMultiple"
] | [((296, 326), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (324, 326), False, 'from django import forms\n'), ((529, 551), 'booking.models.Schedule.objects.all', 'Schedule.objects.all', ([], {}), '()\n', (549, 551), False, 'from booking.models import Schedule, ParkingSpace\n'), ((568, 598), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (596, 598), False, 'from django import forms\n'), ((937, 959), 'booking.models.Schedule.objects.all', 'Schedule.objects.all', ([], {}), '()\n', (957, 959), False, 'from booking.models import Schedule, ParkingSpace\n'), ((976, 1006), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (1004, 1006), False, 'from django import forms\n'), ((1333, 1359), 'booking.models.ParkingSpace.objects.all', 'ParkingSpace.objects.all', ([], {}), '()\n', (1357, 1359), False, 'from booking.models import Schedule, ParkingSpace\n'), ((1376, 1406), 'django.forms.CheckboxSelectMultiple', 'forms.CheckboxSelectMultiple', ([], {}), '()\n', (1404, 1406), False, 'from django import forms\n'), ((268, 278), 'datetime.datetime.today', 'dt.today', ([], {}), '()\n', (276, 278), True, 'from datetime import datetime as dt\n')] |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import uuid
from importlib import reload
from unittest import mock
from unittest.mock import patch
from google.api_core import operation
from google.cloud import aiplatform
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform.compat.types import (
matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref,
index_endpoint as gca_index_endpoint,
index as gca_index,
)
from google.cloud.aiplatform.compat.services import (
index_endpoint_service_client,
index_service_client,
)
from google.protobuf import field_mask_pb2
import pytest
# project
_TEST_PROJECT = "test-project"
_TEST_LOCATION = "us-central1"
_TEST_PARENT = f"projects/{_TEST_PROJECT}/locations/{_TEST_LOCATION}"
# index
_TEST_INDEX_ID = "index_id"
_TEST_INDEX_NAME = f"{_TEST_PARENT}/indexes/{_TEST_INDEX_ID}"
_TEST_INDEX_DISPLAY_NAME = "index_display_name"
# index_endpoint
_TEST_INDEX_ENDPOINT_ID = "index_endpoint_id"
_TEST_INDEX_ENDPOINT_NAME = f"{_TEST_PARENT}/indexEndpoints/{_TEST_INDEX_ENDPOINT_ID}"
_TEST_INDEX_ENDPOINT_DISPLAY_NAME = "index_endpoint_display_name"
_TEST_INDEX_ENDPOINT_DESCRIPTION = "index_endpoint_description"
_TEST_INDEX_DESCRIPTION = "index_description"
_TEST_INDEX_ENDPOINT_VPC_NETWORK = "projects/{}/global/networks/{}".format(
"12345", "network"
)
_TEST_LABELS = {"my_key": "my_value"}
_TEST_DISPLAY_NAME_UPDATE = "my new display name"
_TEST_DESCRIPTION_UPDATE = "my description update"
_TEST_LABELS_UPDATE = {"my_key_update": "my_value_update"}
# deployment
_TEST_DEPLOYED_INDEX_ID = "deployed_index_id"
_TEST_DEPLOYED_INDEX_DISPLAY_NAME = "deployed_index_display_name"
_TEST_MIN_REPLICA_COUNT = 2
_TEST_MAX_REPLICA_COUNT = 2
_TEST_ENABLE_ACCESS_LOGGING = False
_TEST_RESERVED_IP_RANGES = ["vertex-ai-ip-range-1", "vertex-ai-ip-range-2"]
_TEST_DEPLOYMENT_GROUP = "prod"
_TEST_AUTH_CONFIG_AUDIENCES = ["a", "b"]
_TEST_AUTH_CONFIG_ALLOWED_ISSUERS = [
"<EMAIL>",
"<EMAIL>",
]
# deployment_updated
_TEST_MIN_REPLICA_COUNT_UPDATED = 4
_TEST_MAX_REPLICA_COUNT_UPDATED = 4
# request_metadata
_TEST_REQUEST_METADATA = ()
# Lists
_TEST_INDEX_ENDPOINT_LIST = [
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
),
]
# Match
_TEST_QUERIES = [
[
-0.11333,
0.48402,
0.090771,
-0.22439,
0.034206,
-0.55831,
0.041849,
-0.53573,
0.18809,
-0.58722,
0.015313,
-0.014555,
0.80842,
-0.038519,
0.75348,
0.70502,
-0.17863,
0.3222,
0.67575,
0.67198,
0.26044,
0.4187,
-0.34122,
0.2286,
-0.53529,
1.2582,
-0.091543,
0.19716,
-0.037454,
-0.3336,
0.31399,
0.36488,
0.71263,
0.1307,
-0.24654,
-0.52445,
-0.036091,
0.55068,
0.10017,
0.48095,
0.71104,
-0.053462,
0.22325,
0.30917,
-0.39926,
0.036634,
-0.35431,
-0.42795,
0.46444,
0.25586,
0.68257,
-0.20821,
0.38433,
0.055773,
-0.2539,
-0.20804,
0.52522,
-0.11399,
-0.3253,
-0.44104,
0.17528,
0.62255,
0.50237,
-0.7607,
-0.071786,
0.0080131,
-0.13286,
0.50097,
0.18824,
-0.54722,
-0.42664,
0.4292,
0.14877,
-0.0072514,
-0.16484,
-0.059798,
0.9895,
-0.61738,
0.054169,
0.48424,
-0.35084,
-0.27053,
0.37829,
0.11503,
-0.39613,
0.24266,
0.39147,
-0.075256,
0.65093,
-0.20822,
-0.17456,
0.53571,
-0.16537,
0.13582,
-0.56016,
0.016964,
0.1277,
0.94071,
-0.22608,
-0.021106,
]
]
_TEST_NUM_NEIGHBOURS = 1
def uuid_mock():
return uuid.UUID(int=1)
# All index mocks
@pytest.fixture
def get_index_mock():
with patch.object(
index_service_client.IndexServiceClient, "get_index"
) as get_index_mock:
index = gca_index.Index(
name=_TEST_INDEX_NAME,
display_name=_TEST_INDEX_DISPLAY_NAME,
description=_TEST_INDEX_DESCRIPTION,
)
index.deployed_indexes = [
gca_matching_engine_deployed_index_ref.DeployedIndexRef(
index_endpoint=index.name,
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
)
]
get_index_mock.return_value = index
yield get_index_mock
# All index_endpoint mocks
@pytest.fixture
def get_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient, "get_index_endpoint"
) as get_index_endpoint_mock:
index_endpoint = gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
)
index_endpoint.deployed_indexes = [
gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
index=_TEST_INDEX_NAME,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
gca_index_endpoint.DeployedIndex(
id=f"{_TEST_DEPLOYED_INDEX_ID}_2",
index=f"{_TEST_INDEX_NAME}_2",
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
]
get_index_endpoint_mock.return_value = index_endpoint
yield get_index_endpoint_mock
@pytest.fixture
def deploy_index_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"deploy_index",
) as deploy_index_mock:
deploy_index_lro_mock = mock.Mock(operation.Operation)
deploy_index_mock.return_value = deploy_index_lro_mock
yield deploy_index_mock
@pytest.fixture
def undeploy_index_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"undeploy_index",
) as undeploy_index_mock:
undeploy_index_lro_mock = mock.Mock(operation.Operation)
undeploy_index_mock.return_value = undeploy_index_lro_mock
yield undeploy_index_mock
@pytest.fixture
def update_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"update_index_endpoint",
) as index_endpoint_mock:
index_endpoint_mock.return_value = gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_DISPLAY_NAME_UPDATE,
description=_TEST_DESCRIPTION_UPDATE,
labels=_TEST_LABELS_UPDATE,
)
yield index_endpoint_mock
@pytest.fixture
def mutate_deployed_index_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"mutate_deployed_index",
) as mutate_deployed_index_mock:
mutate_deployed_index_lro_mock = mock.Mock(operation.Operation)
update_index_endpoint_mock.return_value = mutate_deployed_index_lro_mock
yield mutate_deployed_index_mock
@pytest.fixture
def list_index_endpoints_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient, "list_index_endpoints"
) as list_index_endpoints_mock:
list_index_endpoints_mock.return_value = _TEST_INDEX_ENDPOINT_LIST
yield list_index_endpoints_mock
@pytest.fixture
def delete_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"delete_index_endpoint",
) as delete_index_endpoint_mock:
delete_index_endpoint_lro_mock = mock.Mock(operation.Operation)
delete_index_endpoint_mock.return_value = delete_index_endpoint_lro_mock
yield delete_index_endpoint_mock
@pytest.fixture
def create_index_endpoint_mock():
with patch.object(
index_endpoint_service_client.IndexEndpointServiceClient,
"create_index_endpoint",
) as create_index_endpoint_mock:
create_index_endpoint_lro_mock = mock.Mock(operation.Operation)
create_index_endpoint_lro_mock.result.return_value = (
gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
)
)
create_index_endpoint_mock.return_value = create_index_endpoint_lro_mock
yield create_index_endpoint_mock
@pytest.mark.usefixtures("google_auth_mock")
class TestMatchingEngineIndexEndpoint:
def setup_method(self):
reload(initializer)
reload(aiplatform)
def teardown_method(self):
initializer.global_pool.shutdown(wait=True)
@pytest.mark.parametrize(
"index_endpoint_name", [_TEST_INDEX_ENDPOINT_ID, _TEST_INDEX_ENDPOINT_NAME]
)
def test_init_index_endpoint(self, index_endpoint_name, get_index_endpoint_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=index_endpoint_name
)
get_index_endpoint_mock.assert_called_once_with(
name=my_index_endpoint.resource_name, retry=base._DEFAULT_RETRY
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
def test_update_index_endpoint(self, update_index_endpoint_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
updated_endpoint = my_index_endpoint.update(
display_name=_TEST_DISPLAY_NAME_UPDATE,
description=_TEST_DESCRIPTION_UPDATE,
labels=_TEST_LABELS_UPDATE,
request_metadata=_TEST_REQUEST_METADATA,
)
expected = gca_index_endpoint.IndexEndpoint(
name=_TEST_INDEX_ENDPOINT_NAME,
display_name=_TEST_DISPLAY_NAME_UPDATE,
description=_TEST_DESCRIPTION_UPDATE,
labels=_TEST_LABELS_UPDATE,
)
update_index_endpoint_mock.assert_called_once_with(
index_endpoint=expected,
update_mask=field_mask_pb2.FieldMask(
paths=["labels", "display_name", "description"]
),
metadata=_TEST_REQUEST_METADATA,
)
assert updated_endpoint.gca_resource == expected
def test_list_index_endpoints(self, list_index_endpoints_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoints_list = aiplatform.MatchingEngineIndexEndpoint.list()
list_index_endpoints_mock.assert_called_once_with(
request={"parent": _TEST_PARENT, "filter": None}
)
assert len(my_index_endpoints_list) == len(_TEST_INDEX_ENDPOINT_LIST)
for my_index_endpoint in my_index_endpoints_list:
assert type(my_index_endpoint) == aiplatform.MatchingEngineIndexEndpoint
@pytest.mark.parametrize("sync", [True, False])
@pytest.mark.usefixtures("get_index_endpoint_mock")
def test_delete_index_endpoint(self, delete_index_endpoint_mock, sync):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
my_index_endpoint.delete(sync=sync)
if not sync:
my_index_endpoint.wait()
delete_index_endpoint_mock.assert_called_once_with(
name=my_index_endpoint.resource_name
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_create_index_endpoint(self, create_index_endpoint_mock, sync):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint.create(
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
labels=_TEST_LABELS,
)
if not sync:
my_index_endpoint.wait()
expected = gca_index_endpoint.IndexEndpoint(
display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME,
network=_TEST_INDEX_ENDPOINT_VPC_NETWORK,
description=_TEST_INDEX_ENDPOINT_DESCRIPTION,
labels=_TEST_LABELS,
)
create_index_endpoint_mock.assert_called_once_with(
parent=_TEST_PARENT,
index_endpoint=expected,
metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock")
def test_deploy_index(self, deploy_index_mock, undeploy_index_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
# Get index
my_index = aiplatform.MatchingEngineIndex(index_name=_TEST_INDEX_NAME)
my_index_endpoint = my_index_endpoint.deploy_index(
index=my_index,
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
min_replica_count=_TEST_MIN_REPLICA_COUNT,
max_replica_count=_TEST_MAX_REPLICA_COUNT,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
auth_config_audiences=_TEST_AUTH_CONFIG_AUDIENCES,
auth_config_allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
request_metadata=_TEST_REQUEST_METADATA,
)
deploy_index_mock.assert_called_once_with(
index_endpoint=my_index_endpoint.resource_name,
deployed_index=gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
index=my_index.resource_name,
display_name=_TEST_DEPLOYED_INDEX_DISPLAY_NAME,
enable_access_logging=_TEST_ENABLE_ACCESS_LOGGING,
reserved_ip_ranges=_TEST_RESERVED_IP_RANGES,
deployment_group=_TEST_DEPLOYMENT_GROUP,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT,
"max_replica_count": _TEST_MAX_REPLICA_COUNT,
},
deployed_index_auth_config=gca_index_endpoint.DeployedIndexAuthConfig(
auth_provider=gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider(
audiences=_TEST_AUTH_CONFIG_AUDIENCES,
allowed_issuers=_TEST_AUTH_CONFIG_ALLOWED_ISSUERS,
)
),
),
metadata=_TEST_REQUEST_METADATA,
)
my_index_endpoint = my_index_endpoint.undeploy_index(
deployed_index_id=_TEST_DEPLOYED_INDEX_ID
)
undeploy_index_mock.assert_called_once_with(
index_endpoint=my_index_endpoint.resource_name,
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures("get_index_endpoint_mock", "get_index_mock")
def test_mutate_deployed_index(self, mutate_deployed_index_mock):
aiplatform.init(project=_TEST_PROJECT)
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_ID
)
my_index_endpoint.mutate_deployed_index(
deployed_index_id=_TEST_DEPLOYED_INDEX_ID,
min_replica_count=_TEST_MIN_REPLICA_COUNT_UPDATED,
max_replica_count=_TEST_MAX_REPLICA_COUNT_UPDATED,
request_metadata=_TEST_REQUEST_METADATA,
)
mutate_deployed_index_mock.assert_called_once_with(
index_endpoint=_TEST_INDEX_ENDPOINT_NAME,
deployed_index=gca_index_endpoint.DeployedIndex(
id=_TEST_DEPLOYED_INDEX_ID,
automatic_resources={
"min_replica_count": _TEST_MIN_REPLICA_COUNT_UPDATED,
"max_replica_count": _TEST_MAX_REPLICA_COUNT_UPDATED,
},
),
metadata=_TEST_REQUEST_METADATA,
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_delete_index_endpoint_without_force(
self, undeploy_index_mock, delete_index_endpoint_mock, sync
):
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_NAME
)
my_index_endpoint.delete(sync=sync)
if not sync:
my_index_endpoint.wait()
# undeploy_index_mock should not be called unless force is set to True
undeploy_index_mock.assert_not_called()
delete_index_endpoint_mock.assert_called_once_with(
name=_TEST_INDEX_ENDPOINT_NAME
)
@pytest.mark.usefixtures("get_index_endpoint_mock")
@pytest.mark.parametrize("sync", [True, False])
def test_delete_index_endpoint_with_force(
self, undeploy_index_mock, delete_index_endpoint_mock, sync
):
my_index_endpoint = aiplatform.MatchingEngineIndexEndpoint(
index_endpoint_name=_TEST_INDEX_ENDPOINT_NAME
)
my_index_endpoint.delete(force=True, sync=sync)
if not sync:
my_index_endpoint.wait()
# undeploy_index_mock should be called if force is set to True
assert undeploy_index_mock.call_count == 2
delete_index_endpoint_mock.assert_called_once_with(
name=_TEST_INDEX_ENDPOINT_NAME
)
| [
"google.protobuf.field_mask_pb2.FieldMask",
"uuid.UUID",
"unittest.mock.Mock",
"google.cloud.aiplatform.MatchingEngineIndexEndpoint.list",
"google.cloud.aiplatform.MatchingEngineIndexEndpoint",
"google.cloud.aiplatform.MatchingEngineIndex",
"google.cloud.aiplatform.compat.types.index_endpoint.DeployedIn... | [((11432, 11475), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""google_auth_mock"""'], {}), "('google_auth_mock')\n", (11455, 11475), False, 'import pytest\n'), ((2782, 2949), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION)\n', (2814, 2949), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((2977, 3144), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION)\n', (3009, 3144), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((3172, 3339), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION)\n', (3204, 3339), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((5216, 5232), 'uuid.UUID', 'uuid.UUID', ([], {'int': '(1)'}), '(int=1)\n', (5225, 5232), False, 'import uuid\n'), ((11688, 11792), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""index_endpoint_name"""', '[_TEST_INDEX_ENDPOINT_ID, _TEST_INDEX_ENDPOINT_NAME]'], {}), "('index_endpoint_name', [_TEST_INDEX_ENDPOINT_ID,\n _TEST_INDEX_ENDPOINT_NAME])\n", (11711, 11792), False, 'import pytest\n'), ((12217, 12267), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""'], {}), "('get_index_endpoint_mock')\n", (12240, 12267), False, 'import pytest\n'), ((13923, 13969), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sync"""', '[True, False]'], {}), "('sync', [True, False])\n", (13946, 13969), False, 'import pytest\n'), ((13975, 14025), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""'], {}), "('get_index_endpoint_mock')\n", (13998, 14025), False, 'import pytest\n'), ((14513, 14563), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""'], {}), "('get_index_endpoint_mock')\n", (14536, 14563), False, 'import pytest\n'), ((14569, 14615), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sync"""', '[True, False]'], {}), "('sync', [True, False])\n", (14592, 14615), False, 'import pytest\n'), ((15549, 15617), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""', '"""get_index_mock"""'], {}), "('get_index_endpoint_mock', 'get_index_mock')\n", (15572, 15617), False, 'import pytest\n'), ((18146, 18214), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""', '"""get_index_mock"""'], {}), "('get_index_endpoint_mock', 'get_index_mock')\n", (18169, 18214), False, 'import pytest\n'), ((19262, 19312), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""'], {}), "('get_index_endpoint_mock')\n", (19285, 19312), False, 'import pytest\n'), ((19318, 19364), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sync"""', '[True, False]'], {}), "('sync', [True, False])\n", (19341, 19364), False, 'import pytest\n'), ((19979, 20029), 'pytest.mark.usefixtures', 'pytest.mark.usefixtures', (['"""get_index_endpoint_mock"""'], {}), "('get_index_endpoint_mock')\n", (20002, 20029), False, 'import pytest\n'), ((20035, 20081), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""sync"""', '[True, False]'], {}), "('sync', [True, False])\n", (20058, 20081), False, 'import pytest\n'), ((5300, 5366), 'unittest.mock.patch.object', 'patch.object', (['index_service_client.IndexServiceClient', '"""get_index"""'], {}), "(index_service_client.IndexServiceClient, 'get_index')\n", (5312, 5366), False, 'from unittest.mock import patch\n'), ((5416, 5535), 'google.cloud.aiplatform.compat.types.index.Index', 'gca_index.Index', ([], {'name': '_TEST_INDEX_NAME', 'display_name': '_TEST_INDEX_DISPLAY_NAME', 'description': '_TEST_INDEX_DESCRIPTION'}), '(name=_TEST_INDEX_NAME, display_name=\n _TEST_INDEX_DISPLAY_NAME, description=_TEST_INDEX_DESCRIPTION)\n', (5431, 5535), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((5968, 6064), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""get_index_endpoint"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'get_index_endpoint')\n", (5980, 6064), False, 'from unittest.mock import patch\n'), ((6128, 6295), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION)\n', (6160, 6295), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((8435, 8525), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""deploy_index"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'deploy_index')\n", (8447, 8525), False, 'from unittest.mock import patch\n'), ((8599, 8629), 'unittest.mock.Mock', 'mock.Mock', (['operation.Operation'], {}), '(operation.Operation)\n', (8608, 8629), False, 'from unittest import mock\n'), ((8779, 8871), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""undeploy_index"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'undeploy_index')\n", (8791, 8871), False, 'from unittest.mock import patch\n'), ((8949, 8979), 'unittest.mock.Mock', 'mock.Mock', (['operation.Operation'], {}), '(operation.Operation)\n', (8958, 8979), False, 'from unittest import mock\n'), ((9142, 9241), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""update_index_endpoint"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'update_index_endpoint')\n", (9154, 9241), False, 'from unittest.mock import patch\n'), ((9328, 9507), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_DISPLAY_NAME_UPDATE', 'description': '_TEST_DESCRIPTION_UPDATE', 'labels': '_TEST_LABELS_UPDATE'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_DISPLAY_NAME_UPDATE, description=\n _TEST_DESCRIPTION_UPDATE, labels=_TEST_LABELS_UPDATE)\n', (9360, 9507), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((9654, 9753), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""mutate_deployed_index"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'mutate_deployed_index')\n", (9666, 9753), False, 'from unittest.mock import patch\n'), ((9845, 9875), 'unittest.mock.Mock', 'mock.Mock', (['operation.Operation'], {}), '(operation.Operation)\n', (9854, 9875), False, 'from unittest import mock\n'), ((10058, 10156), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""list_index_endpoints"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'list_index_endpoints')\n", (10070, 10156), False, 'from unittest.mock import patch\n'), ((10373, 10472), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""delete_index_endpoint"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'delete_index_endpoint')\n", (10385, 10472), False, 'from unittest.mock import patch\n'), ((10564, 10594), 'unittest.mock.Mock', 'mock.Mock', (['operation.Operation'], {}), '(operation.Operation)\n', (10573, 10594), False, 'from unittest import mock\n'), ((10778, 10877), 'unittest.mock.patch.object', 'patch.object', (['index_endpoint_service_client.IndexEndpointServiceClient', '"""create_index_endpoint"""'], {}), "(index_endpoint_service_client.IndexEndpointServiceClient,\n 'create_index_endpoint')\n", (10790, 10877), False, 'from unittest.mock import patch\n'), ((10969, 10999), 'unittest.mock.Mock', 'mock.Mock', (['operation.Operation'], {}), '(operation.Operation)\n', (10978, 10999), False, 'from unittest import mock\n'), ((11075, 11242), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_INDEX_ENDPOINT_DISPLAY_NAME, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION)\n', (11107, 11242), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((11551, 11570), 'importlib.reload', 'reload', (['initializer'], {}), '(initializer)\n', (11557, 11570), False, 'from importlib import reload\n'), ((11579, 11597), 'importlib.reload', 'reload', (['aiplatform'], {}), '(aiplatform)\n', (11585, 11597), False, 'from importlib import reload\n'), ((11638, 11681), 'google.cloud.aiplatform.initializer.global_pool.shutdown', 'initializer.global_pool.shutdown', ([], {'wait': '(True)'}), '(wait=True)\n', (11670, 11681), False, 'from google.cloud.aiplatform import initializer\n'), ((11897, 11935), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (11912, 11935), False, 'from google.cloud import aiplatform\n'), ((11965, 12044), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': 'index_endpoint_name'}), '(index_endpoint_name=index_endpoint_name)\n', (12003, 12044), False, 'from google.cloud import aiplatform\n'), ((12346, 12384), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (12361, 12384), False, 'from google.cloud import aiplatform\n'), ((12414, 12502), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': '_TEST_INDEX_ENDPOINT_ID'}), '(index_endpoint_name=\n _TEST_INDEX_ENDPOINT_ID)\n', (12452, 12502), False, 'from google.cloud import aiplatform\n'), ((12798, 12977), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'name': '_TEST_INDEX_ENDPOINT_NAME', 'display_name': '_TEST_DISPLAY_NAME_UPDATE', 'description': '_TEST_DESCRIPTION_UPDATE', 'labels': '_TEST_LABELS_UPDATE'}), '(name=_TEST_INDEX_ENDPOINT_NAME,\n display_name=_TEST_DISPLAY_NAME_UPDATE, description=\n _TEST_DESCRIPTION_UPDATE, labels=_TEST_LABELS_UPDATE)\n', (12830, 12977), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((13445, 13483), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (13460, 13483), False, 'from google.cloud import aiplatform\n'), ((13519, 13564), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint.list', 'aiplatform.MatchingEngineIndexEndpoint.list', ([], {}), '()\n', (13562, 13564), False, 'from google.cloud import aiplatform\n'), ((14110, 14148), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (14125, 14148), False, 'from google.cloud import aiplatform\n'), ((14178, 14266), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': '_TEST_INDEX_ENDPOINT_ID'}), '(index_endpoint_name=\n _TEST_INDEX_ENDPOINT_ID)\n', (14216, 14266), False, 'from google.cloud import aiplatform\n'), ((14700, 14738), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (14715, 14738), False, 'from google.cloud import aiplatform\n'), ((14768, 14985), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint.create', 'aiplatform.MatchingEngineIndexEndpoint.create', ([], {'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'network': '_TEST_INDEX_ENDPOINT_VPC_NETWORK', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION', 'labels': '_TEST_LABELS'}), '(display_name=\n _TEST_INDEX_ENDPOINT_DISPLAY_NAME, network=\n _TEST_INDEX_ENDPOINT_VPC_NETWORK, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION, labels=_TEST_LABELS)\n', (14813, 14985), False, 'from google.cloud import aiplatform\n'), ((15109, 15313), 'google.cloud.aiplatform.compat.types.index_endpoint.IndexEndpoint', 'gca_index_endpoint.IndexEndpoint', ([], {'display_name': '_TEST_INDEX_ENDPOINT_DISPLAY_NAME', 'network': '_TEST_INDEX_ENDPOINT_VPC_NETWORK', 'description': '_TEST_INDEX_ENDPOINT_DESCRIPTION', 'labels': '_TEST_LABELS'}), '(display_name=\n _TEST_INDEX_ENDPOINT_DISPLAY_NAME, network=\n _TEST_INDEX_ENDPOINT_VPC_NETWORK, description=\n _TEST_INDEX_ENDPOINT_DESCRIPTION, labels=_TEST_LABELS)\n', (15141, 15313), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((15699, 15737), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (15714, 15737), False, 'from google.cloud import aiplatform\n'), ((15767, 15855), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': '_TEST_INDEX_ENDPOINT_ID'}), '(index_endpoint_name=\n _TEST_INDEX_ENDPOINT_ID)\n', (15805, 15855), False, 'from google.cloud import aiplatform\n'), ((15913, 15972), 'google.cloud.aiplatform.MatchingEngineIndex', 'aiplatform.MatchingEngineIndex', ([], {'index_name': '_TEST_INDEX_NAME'}), '(index_name=_TEST_INDEX_NAME)\n', (15943, 15972), False, 'from google.cloud import aiplatform\n'), ((18293, 18331), 'google.cloud.aiplatform.init', 'aiplatform.init', ([], {'project': '_TEST_PROJECT'}), '(project=_TEST_PROJECT)\n', (18308, 18331), False, 'from google.cloud import aiplatform\n'), ((18361, 18449), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': '_TEST_INDEX_ENDPOINT_ID'}), '(index_endpoint_name=\n _TEST_INDEX_ENDPOINT_ID)\n', (18399, 18449), False, 'from google.cloud import aiplatform\n'), ((19519, 19609), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': '_TEST_INDEX_ENDPOINT_NAME'}), '(index_endpoint_name=\n _TEST_INDEX_ENDPOINT_NAME)\n', (19557, 19609), False, 'from google.cloud import aiplatform\n'), ((20233, 20323), 'google.cloud.aiplatform.MatchingEngineIndexEndpoint', 'aiplatform.MatchingEngineIndexEndpoint', ([], {'index_endpoint_name': '_TEST_INDEX_ENDPOINT_NAME'}), '(index_endpoint_name=\n _TEST_INDEX_ENDPOINT_NAME)\n', (20271, 20323), False, 'from google.cloud import aiplatform\n'), ((5626, 5756), 'google.cloud.aiplatform.compat.types.matching_engine_deployed_index_ref.DeployedIndexRef', 'gca_matching_engine_deployed_index_ref.DeployedIndexRef', ([], {'index_endpoint': 'index.name', 'deployed_index_id': '_TEST_DEPLOYED_INDEX_ID'}), '(index_endpoint=\n index.name, deployed_index_id=_TEST_DEPLOYED_INDEX_ID)\n', (5681, 5756), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((13150, 13223), 'google.protobuf.field_mask_pb2.FieldMask', 'field_mask_pb2.FieldMask', ([], {'paths': "['labels', 'display_name', 'description']"}), "(paths=['labels', 'display_name', 'description'])\n", (13174, 13223), False, 'from google.protobuf import field_mask_pb2\n'), ((18903, 19105), 'google.cloud.aiplatform.compat.types.index_endpoint.DeployedIndex', 'gca_index_endpoint.DeployedIndex', ([], {'id': '_TEST_DEPLOYED_INDEX_ID', 'automatic_resources': "{'min_replica_count': _TEST_MIN_REPLICA_COUNT_UPDATED, 'max_replica_count':\n _TEST_MAX_REPLICA_COUNT_UPDATED}"}), "(id=_TEST_DEPLOYED_INDEX_ID,\n automatic_resources={'min_replica_count':\n _TEST_MIN_REPLICA_COUNT_UPDATED, 'max_replica_count':\n _TEST_MAX_REPLICA_COUNT_UPDATED})\n", (18935, 19105), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((7067, 7222), 'google.cloud.aiplatform.compat.types.index_endpoint.DeployedIndexAuthConfig.AuthProvider', 'gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider', ([], {'audiences': '_TEST_AUTH_CONFIG_AUDIENCES', 'allowed_issuers': '_TEST_AUTH_CONFIG_ALLOWED_ISSUERS'}), '(audiences=\n _TEST_AUTH_CONFIG_AUDIENCES, allowed_issuers=\n _TEST_AUTH_CONFIG_ALLOWED_ISSUERS)\n', (7122, 7222), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((8021, 8176), 'google.cloud.aiplatform.compat.types.index_endpoint.DeployedIndexAuthConfig.AuthProvider', 'gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider', ([], {'audiences': '_TEST_AUTH_CONFIG_AUDIENCES', 'allowed_issuers': '_TEST_AUTH_CONFIG_ALLOWED_ISSUERS'}), '(audiences=\n _TEST_AUTH_CONFIG_AUDIENCES, allowed_issuers=\n _TEST_AUTH_CONFIG_ALLOWED_ISSUERS)\n', (8076, 8176), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n'), ((17483, 17638), 'google.cloud.aiplatform.compat.types.index_endpoint.DeployedIndexAuthConfig.AuthProvider', 'gca_index_endpoint.DeployedIndexAuthConfig.AuthProvider', ([], {'audiences': '_TEST_AUTH_CONFIG_AUDIENCES', 'allowed_issuers': '_TEST_AUTH_CONFIG_ALLOWED_ISSUERS'}), '(audiences=\n _TEST_AUTH_CONFIG_AUDIENCES, allowed_issuers=\n _TEST_AUTH_CONFIG_ALLOWED_ISSUERS)\n', (17538, 17638), True, 'from google.cloud.aiplatform.compat.types import matching_engine_deployed_index_ref as gca_matching_engine_deployed_index_ref, index_endpoint as gca_index_endpoint, index as gca_index\n')] |
#! /usr/bin/python
'''
Test for session module
'''
import unittest
import uestc_eams
from .mock_server import LoginMockServer
from .utils import HookedMethod, MakeResponse
mock_login = LoginMockServer()
class TestSession(unittest.TestCase):
@mock_login.Patch
def test_Session(self):
self.__session = uestc_eams.EAMSSession()
# Try login
print('--> Login test <--')
self.assertTrue(self.__session.Login('2015070804011', '104728'))
self.assertTrue(mock_login.Logined)
self.assertEqual(mock_login.GetIndexCount, 1)
print('passed.', end = '\n\n')
# Test expire session
print('--> test expired cookies <--')
test_url = 'http://eams.uestc.edu.cn/eams'
mock_login.ExpireTestTiggered = False
rep = self.__session.TryRequestGet(test_url)
self.assertTrue(mock_login.ExpireTestTiggered)
self.assertTrue(mock_login.Logined)
self.assertNotEqual(-1, rep.url.find(test_url))
print('passed.', end = '\n\n')
# Test expire session with no redirects following
print('--> test expired cookies (no redirects following) <--')
test_url = 'http://eams.uestc.edu.cn/eams/redirect_test'
mock_login.ExpireTestTiggered = False
rep = self.__session.TryRequestGet(test_url, allow_redirects = False)
self.assertTrue(mock_login.ExpireTestTiggered)
self.assertTrue(mock_login.Logined)
self.assertNotEqual(-1, rep.url.find(test_url))
self.assertEqual(rep.status_code, 302)
print('passed.', end = '\n\n')
# Test expire session with HTTP 200 redirects.
print('--> test expired cookies (200 redirect) <--')
test_url = 'http://eams.uestc.edu.cn/eams/200redirect'
mock_login.ExpireTestTiggered = False
mock_login._200RedirectTiggered = False
rep = self.__session.TryRequestGet(test_url)
self.assertEqual(mock_login.ExpireTestTiggered, True)
self.assertEqual(mock_login._200RedirectTiggered, True)
print('passed.', end = '\n\n')
# Test expire session with redirect inside page.
print('--> test logout <--')
self.assertTrue(self.__session.Logout())
self.assertFalse(mock_login.Logined)
print('passed.', end = '\n\n')
| [
"uestc_eams.EAMSSession"
] | [((329, 353), 'uestc_eams.EAMSSession', 'uestc_eams.EAMSSession', ([], {}), '()\n', (351, 353), False, 'import uestc_eams\n')] |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
from absl import app
from tensorflow.python.compat import v2_compat
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import values
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.util import nest
def gather(strategy, value):
"""Gathers value from all workers.
This is intended for tests before we implement an official all-gather API.
Args:
strategy: a `tf.distribute.Strategy`.
value: a nested structure of n-dim `tf.distribute.DistributedValue` of
`tf.Tensor`, or of a `tf.Tensor` if the strategy only has one replica.
Cannot contain tf.sparse.SparseTensor.
Returns:
a (n+1)-dim `tf.Tensor`.
"""
return nest.map_structure(functools.partial(_gather, strategy), value)
def _gather(strategy, value):
"""Gathers a single value."""
# pylint: disable=protected-access
if not isinstance(value, values.DistributedValues):
value = values.PerReplica([ops.convert_to_tensor(value)])
if not isinstance(strategy.extended,
collective_all_reduce_strategy.CollectiveAllReduceExtended):
return array_ops.stack(value._values)
assert len(strategy.extended.worker_devices) == len(value._values)
inputs = [array_ops.expand_dims_v2(v, axis=0) for v in value._values]
return strategy.gather(values.PerReplica(inputs), axis=0)
# pylint: enable=protected-access
def set_logical_devices_to_at_least(device, num):
"""Create logical devices of at least a given number."""
if num < 1:
raise ValueError("`num` must be at least 1 not %r" % (num,))
physical_devices = config.list_physical_devices(device)
if not physical_devices:
raise RuntimeError("No {} found".format(device))
if len(physical_devices) >= num:
return
# By default each physical device corresponds to one logical device. We create
# multiple logical devices for the last physical device so that we have `num`
# logical devices.
num = num - len(physical_devices) + 1
logical_devices = []
for _ in range(num):
if device.upper() == "GPU":
logical_devices.append(
context.LogicalDeviceConfiguration(memory_limit=2048))
else:
logical_devices.append(context.LogicalDeviceConfiguration())
# Create logical devices from the last device since sometimes the first GPU
# is the primary graphic card and may have less memory available.
config.set_logical_device_configuration(physical_devices[-1], logical_devices)
def _set_logical_devices():
if config.list_physical_devices("GPU"):
set_logical_devices_to_at_least("GPU", 2)
if config.list_physical_devices("CPU"):
set_logical_devices_to_at_least("CPU", 2)
def main(enable_v2_behavior=True, config_logical_devices=True):
"""All-in-one main function for tf.distribute tests."""
if config_logical_devices:
app.call_after_init(_set_logical_devices)
if enable_v2_behavior:
v2_compat.enable_v2_behavior()
else:
v2_compat.disable_v2_behavior()
# TODO(b/131360402): configure default logical devices.
multi_process_runner.test_main()
def _op_dependencies(op):
"""Returns the data and control dependencies of a tf.Operation combined."""
deps = []
for node in itertools.chain(op.inputs, op.control_inputs):
if isinstance(node, ops.Tensor):
node = node.op
assert isinstance(node, ops.Operation)
deps.append(node)
return deps
def topological_sort_operations(operations):
"""Topological sorts a list of operations.
This does a topological sort of the operations in a graph. The edges include
both data dependencies and control dependencies. Note that the edge goes from
an operation to its dependencies.
Args:
operations: a list of tf.Operation in the same graph.
Returns:
A map from a tf.Operation to its topological order.
"""
in_degrees = {}
for op in operations:
if op not in in_degrees:
in_degrees[op] = 0
for next_op in _op_dependencies(op):
in_degrees[next_op] = in_degrees.get(next_op, 0) + 1
nexts = []
for op, in_degree in in_degrees.items():
if in_degree == 0:
nexts.append(op)
order = {}
next_order = 0
while nexts:
op, nexts = nexts[0], nexts[1:]
order[op] = next_order
next_order += 1
for next_op in _op_dependencies(op):
in_degrees[next_op] -= 1
if in_degrees[next_op] == 0:
nexts.append(next_op)
assert len(order) == len(operations)
return order
def _exists_dependency(start, end):
"""Returns whether there exists a dependency chain from start to end."""
nexts = [start]
while nexts:
op, nexts = nexts[0], nexts[1:]
for next_op in _op_dependencies(op):
if next_op == end:
return True
nexts.append(next_op)
return False
def assert_sequential_execution(order, operations):
"""Asserts there's a deterministic execution order between the operations.
Args:
order: a map from a tf.Operation to its topological order.
operations: a list of operations that should be executed sequentially. It
can be given in any order.
"""
# Topological ordering guarantees that, if there's a dependency from N_a to
# N_b, then order[N_a] < order[N_b]. If there do exist a path of dependencies
# among the operations, it always goes from a operation with a smaller
# topological order to one with a larger topological order. Therefore, we only
# need to sort the operations by their topological orders, and verify that
# there's a path of dependency between adjacent pairs.
operations = sorted(operations, key=lambda op: order[op])
for i in range(len(operations) - 1):
if not _exists_dependency(operations[i], operations[i + 1]):
print(operations[i].graph.as_graph_def())
raise AssertionError(
"No dependency between {} and {}. Graph is dumped to stdout.".format(
operations[i].name, operations[i + 1].name))
| [
"itertools.chain",
"tensorflow.python.distribute.multi_process_runner.test_main",
"tensorflow.python.ops.array_ops.stack",
"tensorflow.python.compat.v2_compat.enable_v2_behavior",
"tensorflow.python.ops.array_ops.expand_dims_v2",
"tensorflow.python.framework.config.set_logical_device_configuration",
"te... | [((2668, 2704), 'tensorflow.python.framework.config.list_physical_devices', 'config.list_physical_devices', (['device'], {}), '(device)\n', (2696, 2704), False, 'from tensorflow.python.framework import config\n'), ((3451, 3529), 'tensorflow.python.framework.config.set_logical_device_configuration', 'config.set_logical_device_configuration', (['physical_devices[-1]', 'logical_devices'], {}), '(physical_devices[-1], logical_devices)\n', (3490, 3529), False, 'from tensorflow.python.framework import config\n'), ((3565, 3600), 'tensorflow.python.framework.config.list_physical_devices', 'config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (3593, 3600), False, 'from tensorflow.python.framework import config\n'), ((3653, 3688), 'tensorflow.python.framework.config.list_physical_devices', 'config.list_physical_devices', (['"""CPU"""'], {}), "('CPU')\n", (3681, 3688), False, 'from tensorflow.python.framework import config\n'), ((4099, 4131), 'tensorflow.python.distribute.multi_process_runner.test_main', 'multi_process_runner.test_main', ([], {}), '()\n', (4129, 4131), False, 'from tensorflow.python.distribute import multi_process_runner\n'), ((4264, 4309), 'itertools.chain', 'itertools.chain', (['op.inputs', 'op.control_inputs'], {}), '(op.inputs, op.control_inputs)\n', (4279, 4309), False, 'import itertools\n'), ((1796, 1832), 'functools.partial', 'functools.partial', (['_gather', 'strategy'], {}), '(_gather, strategy)\n', (1813, 1832), False, 'import functools\n'), ((2189, 2219), 'tensorflow.python.ops.array_ops.stack', 'array_ops.stack', (['value._values'], {}), '(value._values)\n', (2204, 2219), False, 'from tensorflow.python.ops import array_ops\n'), ((2301, 2336), 'tensorflow.python.ops.array_ops.expand_dims_v2', 'array_ops.expand_dims_v2', (['v'], {'axis': '(0)'}), '(v, axis=0)\n', (2325, 2336), False, 'from tensorflow.python.ops import array_ops\n'), ((2386, 2411), 'tensorflow.python.distribute.values.PerReplica', 'values.PerReplica', (['inputs'], {}), '(inputs)\n', (2403, 2411), False, 'from tensorflow.python.distribute import values\n'), ((3893, 3934), 'absl.app.call_after_init', 'app.call_after_init', (['_set_logical_devices'], {}), '(_set_logical_devices)\n', (3912, 3934), False, 'from absl import app\n'), ((3964, 3994), 'tensorflow.python.compat.v2_compat.enable_v2_behavior', 'v2_compat.enable_v2_behavior', ([], {}), '()\n', (3992, 3994), False, 'from tensorflow.python.compat import v2_compat\n'), ((4007, 4038), 'tensorflow.python.compat.v2_compat.disable_v2_behavior', 'v2_compat.disable_v2_behavior', ([], {}), '()\n', (4036, 4038), False, 'from tensorflow.python.compat import v2_compat\n'), ((2027, 2055), 'tensorflow.python.framework.ops.convert_to_tensor', 'ops.convert_to_tensor', (['value'], {}), '(value)\n', (2048, 2055), False, 'from tensorflow.python.framework import ops\n'), ((3171, 3224), 'tensorflow.python.eager.context.LogicalDeviceConfiguration', 'context.LogicalDeviceConfiguration', ([], {'memory_limit': '(2048)'}), '(memory_limit=2048)\n', (3205, 3224), False, 'from tensorflow.python.eager import context\n'), ((3265, 3301), 'tensorflow.python.eager.context.LogicalDeviceConfiguration', 'context.LogicalDeviceConfiguration', ([], {}), '()\n', (3299, 3301), False, 'from tensorflow.python.eager import context\n')] |
import csv
class ExperimentDataCannotBeParsedError(Exception):
"""
Defining custom exception type that will be thrown when something fails here
"""
def __init__(self, msg: str = "ERROR"):
self.message = msg
def __str__(self):
"""
Defines what to show when exception is printed - giving some useful info
"""
return """
Experiment data cannot be processed. Please take a look at them.
Column names should be:
- Time
- Temperature
- HeatFlux
- T_amb
Issue: {}
""".format(self.message)
class Material:
"""
Class responsible for initializing and storing material data
"""
def __init__(self, rho, cp, lmbd):
self.rho = rho # Mass density
self.cp = cp # Specific heat capacity
self.lmbd = lmbd # Heat conductivity
class ExperimentalData:
"""
Class responsible for initializing and storing experimental data
TODO: really agree on the parsing logic (indexes vs named columns)
"""
def __init__(self, csv_file_path="DATA.csv"):
# Preparing lists for storing all the data
self.t_data = [] # experimental data of time points
self.T_data = [] # experimental data of temperature
self.q_data = [] # experimental data of Measured HeatFluxes (might be missing)
self.T_amb_data = [] # experimental data of ambient temperature
# Defining the column names we are expecting
self.t_column_name = "Time"
self.T_column_name = "Temperature"
self.q_column_name = "HeatFlux"
self.T_amb_column_name = "T_amb"
# Trying to parse the file with experimental data, in case of any error
# relay it with our custom name
try:
with open(csv_file_path) as csv_file:
# NOTE: when using DictReader, skipping first row is not necessary,
# on the contrary, we would be losing one row of data by it
csv_reader = csv.DictReader(csv_file)
# Validating the correctness of CSV file
self.check_CSV_file_correctness(csv_reader.fieldnames)
# Filling the data row by row
for row in csv_reader:
self.t_data.append(float(row.get(self.t_column_name, 0)))
self.T_data.append(float(row.get(self.T_column_name, 0)))
self.q_data.append(float(row.get(self.q_column_name, 0)))
self.T_amb_data.append(float(row.get(self.T_amb_column_name, 0)))
except ExperimentDataCannotBeParsedError:
raise
except Exception as e:
raise ExperimentDataCannotBeParsedError(e)
def check_CSV_file_correctness(self, column_names: list) -> None:
"""
Making sure the CSV file contains the right columns.
We need the time and ambient temperatures to be there all the time,
and them at least one of the temperature and flux.
In case of some problem throw our custom error.
Args:
column_names ... list of columns from the CSV file
"""
if self.t_column_name not in column_names:
msg = "Time data is empty, please use 'Time' column for this data"
raise ExperimentDataCannotBeParsedError(msg)
if self.T_amb_column_name not in column_names:
msg = "Ambient temperature data is empty, please use 'T_amb' column for this data"
raise ExperimentDataCannotBeParsedError(msg)
if self.T_column_name not in column_names and self.q_column_name not in column_names:
msg = "Temperature and flux data are empty, please use 'Temperature' and 'HeatFlux' columns for this data"
raise ExperimentDataCannotBeParsedError(msg)
| [
"csv.DictReader"
] | [((2122, 2146), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {}), '(csv_file)\n', (2136, 2146), False, 'import csv\n')] |
import sys
import numpy as np
import scipy.integrate
import scipy.special
from ._dblquad import dblquad
HAVE_PYGSL = False
try:
import pygsl.integrate
import pygsl.sf
HAVE_PYGSL = True
except ImportError:
pass
class BinEB(object):
def __init__(
self, tmin, tmax, Nb, windows=None, linear=False, useArcmin=True, fname=None
):
if fname is not None:
self.read_data(fname)
else:
# set basic params
if useArcmin:
am2r = np.pi / 180.0 / 60.0
else:
am2r = 1.0
self.Nb = Nb
self.L = tmin * am2r
self.H = tmax * am2r
if linear:
self.Lb = (self.H - self.L) / Nb * np.arange(Nb) + self.L
self.Hb = (self.H - self.L) / Nb * (np.arange(Nb) + 1.0) + self.L
else:
self.Lb = np.exp(np.log(self.H / self.L) / Nb * np.arange(Nb)) * self.L
self.Hb = (
np.exp(np.log(self.H / self.L) / Nb * (np.arange(Nb) + 1.0))
* self.L
)
self.have_ell_win = False
# make the bin window functions
if windows is None:
def _make_geomwin(L, H):
return lambda x: 2.0 * x / (H * H - L * L)
self.windows = []
for i in range(self.Nb):
self.windows.append(_make_geomwin(self.Lb[i], self.Hb[i]))
else:
def _make_normwin(winf, norm):
return lambda x: winf(x / am2r) / norm
self.windows = []
assert (
len(windows) == Nb
), "binEB requires as many windows as angular bins!"
for i in range(self.Nb):
twin = _make_normwin(windows[i], 1.0)
norm, err = scipy.integrate.quad(twin, self.Lb[i], self.Hb[i])
self.windows.append(_make_normwin(windows[i], norm))
# get fa and fb
self.fa = np.zeros(self.Nb)
self.fa[:] = 1.0
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def fb_int(x, args):
win = args[0]
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(fb_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.fb[i] = val
else:
def fb_int(x, win):
return win(x) * x * x
self.fb = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
fb_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
self.fb[i] = val
self.fa_on = self.fa / np.sqrt(np.sum(self.fa * self.fa))
self.fb_on = self.fb - self.fa * np.sum(self.fa * self.fb) / np.sum(
self.fa * self.fa
)
self.fb_on = self.fb_on / np.sqrt(np.sum(self.fb_on * self.fb_on))
# get Mplus matrix
if HAVE_PYGSL:
limit = 10
epsabs = 1e-8
epsrel = 1e-8
w = pygsl.integrate.workspace(limit)
def knorm_int(x, args):
win = args[0]
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(knorm_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, args):
win = args[0]
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv2_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv2[i] = val
def inv4_int(x, args):
win = args[0]
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
args = [self.windows[i]]
f = pygsl.integrate.gsl_function(inv4_int, args)
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
inv4[i] = val
else:
def knorm_int(x, win):
return win(x) * win(x) / x
knorm = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
knorm_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
knorm[i] = val
self.invnorm = knorm
def inv2_int(x, win):
return win(x) / x / x
inv2 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv2_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv2[i] = val
def inv4_int(x, win):
return win(x) / x / x / x / x
inv4 = np.zeros(self.Nb)
for i in range(self.Nb):
val, err = scipy.integrate.quad(
inv4_int, self.Lb[i], self.Hb[i], args=(self.windows[i],)
)
inv4[i] = val
if HAVE_PYGSL:
def _mp_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if p > t:
val = (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mp_int(p, t, k, i):
if p > t:
return (
(4.0 / p / p - 12.0 * t * t / p / p / p / p)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mp = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i < k:
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[i] * self.Hb[i]
- self.Lb[i] * self.Lb[i]
)
* np.log(self.Hb[k] / self.Lb[k])
+ 3.0
/ 2.0
* (
np.power(self.Hb[i], 4.0)
- np.power(self.Lb[i], 4.0)
)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
if k == i:
self.mp[k, i] += 1.0
self.mp[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
-0.5
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
- 2.0
* self.Lb[i]
* self.Lb[i]
* np.log(self.Hb[k] / self.Lb[k])
- 3.0
/ 2.0
* np.power(self.Lb[i], 4.0)
* (
1.0 / self.Hb[k] / self.Hb[k]
- 1.0 / self.Lb[k] / self.Lb[k]
)
)
)
else:
if k == i:
self.mp[k, i] += 1.0
val = dblquad(
_mp_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mp[k, i] += val / knorm[k]
if i < k:
self.mp[k, i] = (
4.0 * inv2[k] - 12.0 * inv4[k] * self.fb[i]
) / knorm[k]
# sys.stdout.write("\n")
if HAVE_PYGSL:
def _mm_int(p, args):
t = args[0]
k = args[1]
i = args[2]
if t > p:
val = (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
val = 0.0
return val
else:
def _mm_int(p, t, k, i):
if t > p:
return (
(4.0 / t / t - 12.0 * p * p / t / t / t / t)
* self.windows[k](p)
* self.windows[i](t)
)
else:
return 0.0
self.mm = np.zeros((self.Nb, self.Nb))
for k in range(self.Nb):
# sys.stdout.write("|")
for i in range(self.Nb):
if windows is None:
if i > k:
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
2.0
* (
self.Hb[k] * self.Hb[k]
- self.Lb[k] * self.Lb[k]
)
* np.log(self.Hb[i] / self.Lb[i])
+ 3.0
/ 2.0
* (
np.power(self.Hb[k], 4.0)
- np.power(self.Lb[k], 4.0)
)
* (
1.0 / self.Hb[i] / self.Hb[i]
- 1.0 / self.Lb[i] / self.Lb[i]
)
)
)
if k == i:
self.mm[k, i] += 1.0
self.mm[k, i] += (
2.0
/ (self.Hb[i] * self.Hb[i] - self.Lb[i] * self.Lb[i])
* (
0.5
* (
-1.0 * self.Hb[k] * self.Hb[k]
+ self.Lb[k]
* self.Lb[k]
* (
4.0
- 3.0
* self.Lb[k]
* self.Lb[k]
/ self.Hb[i]
/ self.Hb[i]
- 4.0 * np.log(self.Hb[i] / self.Lb[k])
)
)
)
)
else:
if k == i:
self.mm[k, i] += 1.0
val = dblquad(
_mm_int,
self.Lb[i],
self.Hb[i],
lambda x: self.Lb[k],
lambda x: self.Hb[k],
args=(k, i),
)
self.mm[k, i] += val / knorm[k]
if i > k:
self.mm[k, i] = (
4.0 * inv2[i] - 12.0 * inv4[i] * self.fb[k]
) / knorm[k]
# sys.stdout.write("\n")
# compute the ell windows
self.comp_ell_windows()
def comp_ell_windows(self):
# get the windows in ell
self.have_ell_win = True
if HAVE_PYGSL:
def ellwin_int(theta, args):
ell = args[0]
win = args[1]
n = args[2]
return (pygsl.sf.bessel_Jn(n, ell * theta))[0] * win(theta)
else:
def ellwin_int(theta, ell, win, n):
return scipy.special.jn(n, ell * theta) * win(theta)
self.ellv = np.logspace(0.0, 5.5, 1500)
self.ellwindowsJ0 = np.zeros((self.Nb, len(self.ellv)))
self.ellwindowsJ4 = np.zeros((self.Nb, len(self.ellv)))
for i in range(self.Nb):
sys.stdout.write("|")
sys.stdout.flush()
if HAVE_PYGSL:
epsabs = 1e-6
epsrel = 1e-6
limit = 1000
w = pygsl.integrate.workspace(limit)
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 0]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,
# limit,pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ0[i, j] = val
for j, ell in enumerate(self.ellv):
args = [ell, self.windows[i], 4]
f = pygsl.integrate.gsl_function(ellwin_int, args)
# code,val,err = pygsl.integrate.qag(
# f,self.Lb[i],self.Hb[i],epsabs,epsrel,limit,
# pygsl.integrate.GAUSS61,w
# )
code, val, err = pygsl.integrate.qags(
f, self.Lb[i], self.Hb[i], epsabs, epsrel, limit, w
)
self.ellwindowsJ4[i, j] = val
else:
win0 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 0),
)
)[0]
for ell in self.ellv
]
)
win4 = np.array(
[
(
scipy.integrate.quad(
ellwin_int,
self.Lb[i],
self.Hb[i],
args=(ell, self.windows[i], 4),
)
)[0]
for ell in self.ellv
]
)
self.ellwindowsJ0[i, :] = win0
self.ellwindowsJ4[i, :] = win4
sys.stdout.write("\n")
def write_data(self, fname):
"""
writes a simple text file with object info
# N L H
100 1.0 400.0
# Lb
1.0 1.2 ... 398.0
# Hb
1.2 1.4 ... 400.0
# fa
1.0 1.0 .... 1.0
# fb
blah blah ... blah
# fa_on
blah blah ... blah
# fb_on
blah blah ... blah
# invnorm
blah blah ... blah
# Mplus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# Mminus
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellv
blah blah ... blah
# ellwinJ0
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
# ellwinJ4
blah blah ... blah
blah blah ... blah
.
.
.
blah blah ... blah
"""
def write_vec(fp, vec):
for val in vec:
fp.write("%.20lg " % val)
fp.write("\n#\n")
def write_mat(fp, mat):
shape = mat.shape
for i in range(shape[0]):
for val in mat[i, :]:
fp.write("%.20lg " % val)
fp.write("\n")
fp.write("#\n")
fp = open(fname, "w")
fp.write("# N L H\n")
fp.write("%ld %.20lg %.20lg\n" % (self.Nb, self.L, self.H))
fp.write("# Lb\n")
write_vec(fp, self.Lb)
fp.write("# Hb\n")
write_vec(fp, self.Hb)
fp.write("# fa\n")
write_vec(fp, self.fa)
fp.write("# fb\n")
write_vec(fp, self.fb)
fp.write("# fa_on\n")
write_vec(fp, self.fa_on)
fp.write("# fb_on\n")
write_vec(fp, self.fb_on)
fp.write("# invnorm\n")
write_vec(fp, self.invnorm)
fp.write("# Mplus\n")
write_mat(fp, self.mp)
fp.write("# Mminus\n")
write_mat(fp, self.mm)
fp.write("# ellv\n")
write_vec(fp, self.ellv)
fp.write("# ellwinJ0\n")
write_mat(fp, self.ellwindowsJ0)
fp.write("# ellwinJ4\n")
write_mat(fp, self.ellwindowsJ4)
fp.close()
def read_data(self, fname):
def read_vec(fp):
line = fp.readline()
line = line.strip()
val = np.array([float(tag) for tag in line.split()])
line = fp.readline()
return val
def read_mat(fp):
mat = []
line = fp.readline()
while line[0] != "#":
line = line.strip()
mat.append([float(tag) for tag in line.split()])
line = fp.readline()
mat = np.array(mat)
return mat
fp = open(fname, "r")
line = fp.readline()
line = fp.readline()
line = line.strip()
line = line.split()
self.Nb = int(line[0])
self.L = float(line[1])
self.H = float(line[2])
line = fp.readline()
self.Lb = read_vec(fp)
line = fp.readline()
self.Hb = read_vec(fp)
line = fp.readline()
self.fa = read_vec(fp)
line = fp.readline()
self.fb = read_vec(fp)
line = fp.readline()
self.fa_on = read_vec(fp)
line = fp.readline()
self.fb_on = read_vec(fp)
line = fp.readline()
self.invnorm = read_vec(fp)
line = fp.readline()
self.mp = read_mat(fp)
line = fp.readline()
self.mm = read_mat(fp)
line = fp.readline()
self.ellv = read_vec(fp)
line = fp.readline()
self.ellwindowsJ0 = read_mat(fp)
line = fp.readline()
self.ellwindowsJ4 = read_mat(fp)
self.have_ell_win = True
fp.close()
def fplusminus(self, fptest):
fp = fptest - np.sum(fptest * self.fa_on) * self.fa_on
fp = fp - np.sum(fp * self.fb_on) * self.fb_on
fm = np.dot(self.mp, fp)
"""
code to test
fm = np.zeros(len(fp))
for i in range(len(fp)):
for j in range(len(fp)):
fm[i] += self.mp[i,j]*fp[j]
print fm-np.dot(self.mp,fp)
"""
return fp, fm
def wplus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5
def wminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum - msum) * 0.5
def wplusminus(self, fp, fm):
if not self.have_ell_win:
self.comp_ell_windows()
psum = np.array(
[np.sum(self.ellwindowsJ0[:, i] * fp) for i in range(len(self.ellv))]
)
msum = np.array(
[np.sum(self.ellwindowsJ4[:, i] * fm) for i in range(len(self.ellv))]
)
return self.ellv.copy(), (psum + msum) * 0.5, (psum - msum) * 0.5
| [
"numpy.power",
"numpy.log",
"numpy.array",
"numpy.dot",
"numpy.zeros",
"numpy.sum",
"sys.stdout.flush",
"numpy.logspace",
"numpy.arange",
"sys.stdout.write"
] | [((15257, 15284), 'numpy.logspace', 'np.logspace', (['(0.0)', '(5.5)', '(1500)'], {}), '(0.0, 5.5, 1500)\n', (15268, 15284), True, 'import numpy as np\n'), ((17918, 17940), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (17934, 17940), False, 'import sys\n'), ((22001, 22020), 'numpy.dot', 'np.dot', (['self.mp', 'fp'], {}), '(self.mp, fp)\n', (22007, 22020), True, 'import numpy as np\n'), ((2093, 2110), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (2101, 2110), True, 'import numpy as np\n'), ((7288, 7316), 'numpy.zeros', 'np.zeros', (['(self.Nb, self.Nb)'], {}), '((self.Nb, self.Nb))\n', (7296, 7316), True, 'import numpy as np\n'), ((11470, 11498), 'numpy.zeros', 'np.zeros', (['(self.Nb, self.Nb)'], {}), '((self.Nb, self.Nb))\n', (11478, 11498), True, 'import numpy as np\n'), ((15458, 15479), 'sys.stdout.write', 'sys.stdout.write', (['"""|"""'], {}), "('|')\n", (15474, 15479), False, 'import sys\n'), ((15492, 15510), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (15508, 15510), False, 'import sys\n'), ((20743, 20756), 'numpy.array', 'np.array', (['mat'], {}), '(mat)\n', (20751, 20756), True, 'import numpy as np\n'), ((2449, 2466), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (2457, 2466), True, 'import numpy as np\n'), ((2938, 2955), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (2946, 2955), True, 'import numpy as np\n'), ((3814, 3831), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (3822, 3831), True, 'import numpy as np\n'), ((4357, 4374), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (4365, 4374), True, 'import numpy as np\n'), ((4869, 4886), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (4877, 4886), True, 'import numpy as np\n'), ((5363, 5380), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (5371, 5380), True, 'import numpy as np\n'), ((5757, 5774), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (5765, 5774), True, 'import numpy as np\n'), ((6120, 6137), 'numpy.zeros', 'np.zeros', (['self.Nb'], {}), '(self.Nb)\n', (6128, 6137), True, 'import numpy as np\n'), ((21892, 21919), 'numpy.sum', 'np.sum', (['(fptest * self.fa_on)'], {}), '(fptest * self.fa_on)\n', (21898, 21919), True, 'import numpy as np\n'), ((21951, 21974), 'numpy.sum', 'np.sum', (['(fp * self.fb_on)'], {}), '(fp * self.fb_on)\n', (21957, 21974), True, 'import numpy as np\n'), ((22407, 22443), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ0[:, i] * fp)'], {}), '(self.ellwindowsJ0[:, i] * fp)\n', (22413, 22443), True, 'import numpy as np\n'), ((22524, 22560), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ4[:, i] * fm)'], {}), '(self.ellwindowsJ4[:, i] * fm)\n', (22530, 22560), True, 'import numpy as np\n'), ((22795, 22831), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ0[:, i] * fp)'], {}), '(self.ellwindowsJ0[:, i] * fp)\n', (22801, 22831), True, 'import numpy as np\n'), ((22912, 22948), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ4[:, i] * fm)'], {}), '(self.ellwindowsJ4[:, i] * fm)\n', (22918, 22948), True, 'import numpy as np\n'), ((23187, 23223), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ0[:, i] * fp)'], {}), '(self.ellwindowsJ0[:, i] * fp)\n', (23193, 23223), True, 'import numpy as np\n'), ((23304, 23340), 'numpy.sum', 'np.sum', (['(self.ellwindowsJ4[:, i] * fm)'], {}), '(self.ellwindowsJ4[:, i] * fm)\n', (23310, 23340), True, 'import numpy as np\n'), ((3233, 3258), 'numpy.sum', 'np.sum', (['(self.fa * self.fa)'], {}), '(self.fa * self.fa)\n', (3239, 3258), True, 'import numpy as np\n'), ((3333, 3358), 'numpy.sum', 'np.sum', (['(self.fa * self.fa)'], {}), '(self.fa * self.fa)\n', (3339, 3358), True, 'import numpy as np\n'), ((3435, 3466), 'numpy.sum', 'np.sum', (['(self.fb_on * self.fb_on)'], {}), '(self.fb_on * self.fb_on)\n', (3441, 3466), True, 'import numpy as np\n'), ((751, 764), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (760, 764), True, 'import numpy as np\n'), ((3305, 3330), 'numpy.sum', 'np.sum', (['(self.fa * self.fb)'], {}), '(self.fa * self.fb)\n', (3311, 3330), True, 'import numpy as np\n'), ((826, 839), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (835, 839), True, 'import numpy as np\n'), ((938, 951), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (947, 951), True, 'import numpy as np\n'), ((907, 930), 'numpy.log', 'np.log', (['(self.H / self.L)'], {}), '(self.H / self.L)\n', (913, 930), True, 'import numpy as np\n'), ((1017, 1040), 'numpy.log', 'np.log', (['(self.H / self.L)'], {}), '(self.H / self.L)\n', (1023, 1040), True, 'import numpy as np\n'), ((1049, 1062), 'numpy.arange', 'np.arange', (['Nb'], {}), '(Nb)\n', (1058, 1062), True, 'import numpy as np\n'), ((8000, 8031), 'numpy.log', 'np.log', (['(self.Hb[k] / self.Lb[k])'], {}), '(self.Hb[k] / self.Lb[k])\n', (8006, 8031), True, 'import numpy as np\n'), ((12182, 12213), 'numpy.log', 'np.log', (['(self.Hb[i] / self.Lb[i])'], {}), '(self.Hb[i] / self.Lb[i])\n', (12188, 12213), True, 'import numpy as np\n'), ((9328, 9359), 'numpy.log', 'np.log', (['(self.Hb[k] / self.Lb[k])'], {}), '(self.Hb[k] / self.Lb[k])\n', (9334, 9359), True, 'import numpy as np\n'), ((9482, 9507), 'numpy.power', 'np.power', (['self.Lb[i]', '(4.0)'], {}), '(self.Lb[i], 4.0)\n', (9490, 9507), True, 'import numpy as np\n'), ((8196, 8221), 'numpy.power', 'np.power', (['self.Hb[i]', '(4.0)'], {}), '(self.Hb[i], 4.0)\n', (8204, 8221), True, 'import numpy as np\n'), ((8264, 8289), 'numpy.power', 'np.power', (['self.Lb[i]', '(4.0)'], {}), '(self.Lb[i], 4.0)\n', (8272, 8289), True, 'import numpy as np\n'), ((12378, 12403), 'numpy.power', 'np.power', (['self.Hb[k]', '(4.0)'], {}), '(self.Hb[k], 4.0)\n', (12386, 12403), True, 'import numpy as np\n'), ((12446, 12471), 'numpy.power', 'np.power', (['self.Lb[k]', '(4.0)'], {}), '(self.Lb[k], 4.0)\n', (12454, 12471), True, 'import numpy as np\n'), ((13763, 13794), 'numpy.log', 'np.log', (['(self.Hb[i] / self.Lb[k])'], {}), '(self.Hb[i] / self.Lb[k])\n', (13769, 13794), True, 'import numpy as np\n')] |
import unittest
from unittest.mock import mock_open, patch
from aoc.d8.main import metadata_sum, supervalue
DATA = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2\n"
class TestCase(unittest.TestCase):
def test_metadata_sum(self):
with patch("builtins.open", mock_open(read_data=DATA)):
self.assertEqual(138, metadata_sum())
def test_supervalue(self):
with patch("builtins.open", mock_open(read_data=DATA)):
self.assertEqual(66, supervalue())
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"unittest.mock.mock_open",
"aoc.d8.main.supervalue",
"aoc.d8.main.metadata_sum"
] | [((517, 532), 'unittest.main', 'unittest.main', ([], {}), '()\n', (530, 532), False, 'import unittest\n'), ((263, 288), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': 'DATA'}), '(read_data=DATA)\n', (272, 288), False, 'from unittest.mock import mock_open, patch\n'), ((325, 339), 'aoc.d8.main.metadata_sum', 'metadata_sum', ([], {}), '()\n', (337, 339), False, 'from aoc.d8.main import metadata_sum, supervalue\n'), ((409, 434), 'unittest.mock.mock_open', 'mock_open', ([], {'read_data': 'DATA'}), '(read_data=DATA)\n', (418, 434), False, 'from unittest.mock import mock_open, patch\n'), ((470, 482), 'aoc.d8.main.supervalue', 'supervalue', ([], {}), '()\n', (480, 482), False, 'from aoc.d8.main import metadata_sum, supervalue\n')] |
import stko
import pytest
try:
from openbabel import openbabel
except ImportError:
openbabel = None
def test_open_babel_energy(unoptimized_mol):
if openbabel is None:
with pytest.raises(stko.WrapperNotInstalledException):
calculator = stko.OpenBabelEnergy('uff')
else:
calculator = stko.OpenBabelEnergy('uff')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 141.44622279628743
calculator = stko.OpenBabelEnergy('gaff')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 66.47095418890525
calculator = stko.OpenBabelEnergy('ghemical')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 86.59956589041794
calculator = stko.OpenBabelEnergy('mmff94')
test_energy = calculator.get_energy(unoptimized_mol)
assert test_energy == 7.607999187460175
| [
"stko.OpenBabelEnergy",
"pytest.raises"
] | [((331, 358), 'stko.OpenBabelEnergy', 'stko.OpenBabelEnergy', (['"""uff"""'], {}), "('uff')\n", (351, 358), False, 'import stko\n'), ((491, 519), 'stko.OpenBabelEnergy', 'stko.OpenBabelEnergy', (['"""gaff"""'], {}), "('gaff')\n", (511, 519), False, 'import stko\n'), ((651, 683), 'stko.OpenBabelEnergy', 'stko.OpenBabelEnergy', (['"""ghemical"""'], {}), "('ghemical')\n", (671, 683), False, 'import stko\n'), ((815, 845), 'stko.OpenBabelEnergy', 'stko.OpenBabelEnergy', (['"""mmff94"""'], {}), "('mmff94')\n", (835, 845), False, 'import stko\n'), ((196, 244), 'pytest.raises', 'pytest.raises', (['stko.WrapperNotInstalledException'], {}), '(stko.WrapperNotInstalledException)\n', (209, 244), False, 'import pytest\n'), ((271, 298), 'stko.OpenBabelEnergy', 'stko.OpenBabelEnergy', (['"""uff"""'], {}), "('uff')\n", (291, 298), False, 'import stko\n')] |
from rest_framework import status
from rest_framework.response import Response
from rest_framework.generics import GenericAPIView
from ..permissions import IsAuthenticated
from django.core.cache import cache
from django.conf import settings
from ..authentication import TokenAuthentication
from ..app_settings import (
MembershipDeclineSerializer,
)
class MembershipDeclineView(GenericAPIView):
authentication_classes = (TokenAuthentication, )
permission_classes = (IsAuthenticated,)
allowed_methods = ('POST', 'OPTIONS', 'HEAD')
def get(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def put(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
def post(self, request, *args, **kwargs):
"""
Marks a membership as declined. In addition deletes now unnecessary information.
:param request:
:param uuid: share_right_id
:param args:
:param kwargs:
:return: 200 / 403
"""
serializer = MembershipDeclineSerializer(data=request.data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response(
serializer.errors, status=status.HTTP_400_BAD_REQUEST
)
membership_obj = serializer.validated_data.get('membership_obj')
membership_obj.accepted = False
membership_obj.save()
if settings.CACHE_ENABLE:
cache_key = 'psono_user_status_' + str(membership_obj.user.id)
cache.delete(cache_key)
return Response(status=status.HTTP_200_OK)
def delete(self, *args, **kwargs):
return Response({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)
| [
"django.core.cache.cache.delete",
"rest_framework.response.Response"
] | [((603, 658), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_405_METHOD_NOT_ALLOWED'}), '({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n', (611, 658), False, 'from rest_framework.response import Response\n'), ((711, 766), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_405_METHOD_NOT_ALLOWED'}), '({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n', (719, 766), False, 'from rest_framework.response import Response\n'), ((1626, 1661), 'rest_framework.response.Response', 'Response', ([], {'status': 'status.HTTP_200_OK'}), '(status=status.HTTP_200_OK)\n', (1634, 1661), False, 'from rest_framework.response import Response\n'), ((1717, 1772), 'rest_framework.response.Response', 'Response', (['{}'], {'status': 'status.HTTP_405_METHOD_NOT_ALLOWED'}), '({}, status=status.HTTP_405_METHOD_NOT_ALLOWED)\n', (1725, 1772), False, 'from rest_framework.response import Response\n'), ((1226, 1289), 'rest_framework.response.Response', 'Response', (['serializer.errors'], {'status': 'status.HTTP_400_BAD_REQUEST'}), '(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n', (1234, 1289), False, 'from rest_framework.response import Response\n'), ((1586, 1609), 'django.core.cache.cache.delete', 'cache.delete', (['cache_key'], {}), '(cache_key)\n', (1598, 1609), False, 'from django.core.cache import cache\n')] |
import batoid
import numpy as np
import math
from test_helpers import timer, do_pickle, all_obj_diff
@timer
def test_properties():
import random
random.seed(5)
for i in range(100):
R = random.gauss(0.7, 0.8)
sphere = batoid.Sphere(R)
assert sphere.R == R
do_pickle(sphere)
@timer
def test_sag():
import random
random.seed(57)
for i in range(100):
R = random.gauss(4.2, 0.3)
sphere = batoid.Sphere(R)
for j in range(10):
x = random.uniform(-0.7*R, 0.7*R)
y = random.uniform(-0.7*R, 0.7*R)
result = sphere.sag(x, y)
np.testing.assert_allclose(result, R*(1-math.sqrt(1.0-(x*x + y*y)/R/R)))
# Check that it returned a scalar float and not an array
assert isinstance(result, float)
# Check vectorization
x = np.random.uniform(-0.7*R, 0.7*R, size=(10, 10))
y = np.random.uniform(-0.7*R, 0.7*R, size=(10, 10))
np.testing.assert_allclose(sphere.sag(x, y), R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)))
# Make sure non-unit stride arrays also work
np.testing.assert_allclose(
sphere.sag(x[::5,::2], y[::5,::2]),
(R*(1-np.sqrt(1.0-(x*x + y*y)/R/R)))[::5, ::2]
)
@timer
def test_intersect():
import random
random.seed(577)
for i in range(100):
R = random.gauss(10.0, 0.1)
sphere = batoid.Sphere(R)
for j in range(10):
x = random.gauss(0.0, 1.0)
y = random.gauss(0.0, 1.0)
# If we shoot rays straight up, then it's easy to predict the
# intersection points.
r0 = batoid.Ray(x, y, -1000, 0, 0, 1, 0)
r = sphere.intersect(r0)
np.testing.assert_allclose(r.r[0], x)
np.testing.assert_allclose(r.r[1], y)
np.testing.assert_allclose(r.r[2], sphere.sag(x, y), rtol=0, atol=1e-9)
# Check normal for R=0 paraboloid (a plane)
sphere = batoid.Sphere(0.0)
np.testing.assert_array_equal(sphere.normal(0.1,0.1), [0,0,1])
@timer
def test_intersect_vectorized():
import random
random.seed(5772)
r0s = [batoid.Ray([random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(10.0, 0.1)],
[random.gauss(0.0, 0.1),
random.gauss(0.0, 0.1),
random.gauss(-1.0, 0.1)],
random.gauss(0.0, 0.1))
for i in range(1000)]
r0s = batoid.RayVector(r0s)
for i in range(100):
R = random.gauss(0.05, 0.01)
sphere = batoid.Sphere(R)
r1s = sphere.intersect(r0s)
r2s = batoid.RayVector([sphere.intersect(r0) for r0 in r0s])
assert r1s == r2s
@timer
def test_ne():
objs = [
batoid.Sphere(1.0),
batoid.Sphere(2.0),
batoid.Plane()
]
all_obj_diff(objs)
@timer
def test_fail():
sphere = batoid.Sphere(1.0)
ray = batoid.Ray([0,0,-1], [0,0,-1])
ray = sphere.intersect(ray)
assert ray.failed
ray = batoid.Ray([0,0,-1], [0,0,-1])
sphere.intersectInPlace(ray)
assert ray.failed
if __name__ == '__main__':
test_properties()
test_sag()
test_intersect()
test_intersect_vectorized()
test_ne()
test_fail()
| [
"batoid.Ray",
"batoid.Plane",
"random.uniform",
"numpy.sqrt",
"test_helpers.do_pickle",
"numpy.testing.assert_allclose",
"batoid.RayVector",
"math.sqrt",
"random.seed",
"test_helpers.all_obj_diff",
"numpy.random.uniform",
"batoid.Sphere",
"random.gauss"
] | [((155, 169), 'random.seed', 'random.seed', (['(5)'], {}), '(5)\n', (166, 169), False, 'import random\n'), ((366, 381), 'random.seed', 'random.seed', (['(57)'], {}), '(57)\n', (377, 381), False, 'import random\n'), ((1331, 1347), 'random.seed', 'random.seed', (['(577)'], {}), '(577)\n', (1342, 1347), False, 'import random\n'), ((1995, 2013), 'batoid.Sphere', 'batoid.Sphere', (['(0.0)'], {}), '(0.0)\n', (2008, 2013), False, 'import batoid\n'), ((2145, 2162), 'random.seed', 'random.seed', (['(5772)'], {}), '(5772)\n', (2156, 2162), False, 'import random\n'), ((2538, 2559), 'batoid.RayVector', 'batoid.RayVector', (['r0s'], {}), '(r0s)\n', (2554, 2559), False, 'import batoid\n'), ((2914, 2932), 'test_helpers.all_obj_diff', 'all_obj_diff', (['objs'], {}), '(objs)\n', (2926, 2932), False, 'from test_helpers import timer, do_pickle, all_obj_diff\n'), ((2972, 2990), 'batoid.Sphere', 'batoid.Sphere', (['(1.0)'], {}), '(1.0)\n', (2985, 2990), False, 'import batoid\n'), ((3001, 3035), 'batoid.Ray', 'batoid.Ray', (['[0, 0, -1]', '[0, 0, -1]'], {}), '([0, 0, -1], [0, 0, -1])\n', (3011, 3035), False, 'import batoid\n'), ((3097, 3131), 'batoid.Ray', 'batoid.Ray', (['[0, 0, -1]', '[0, 0, -1]'], {}), '([0, 0, -1], [0, 0, -1])\n', (3107, 3131), False, 'import batoid\n'), ((207, 229), 'random.gauss', 'random.gauss', (['(0.7)', '(0.8)'], {}), '(0.7, 0.8)\n', (219, 229), False, 'import random\n'), ((247, 263), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (260, 263), False, 'import batoid\n'), ((301, 318), 'test_helpers.do_pickle', 'do_pickle', (['sphere'], {}), '(sphere)\n', (310, 318), False, 'from test_helpers import timer, do_pickle, all_obj_diff\n'), ((419, 441), 'random.gauss', 'random.gauss', (['(4.2)', '(0.3)'], {}), '(4.2, 0.3)\n', (431, 441), False, 'import random\n'), ((459, 475), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (472, 475), False, 'import batoid\n'), ((875, 926), 'numpy.random.uniform', 'np.random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {'size': '(10, 10)'}), '(-0.7 * R, 0.7 * R, size=(10, 10))\n', (892, 926), True, 'import numpy as np\n'), ((935, 986), 'numpy.random.uniform', 'np.random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {'size': '(10, 10)'}), '(-0.7 * R, 0.7 * R, size=(10, 10))\n', (952, 986), True, 'import numpy as np\n'), ((1385, 1408), 'random.gauss', 'random.gauss', (['(10.0)', '(0.1)'], {}), '(10.0, 0.1)\n', (1397, 1408), False, 'import random\n'), ((1426, 1442), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (1439, 1442), False, 'import batoid\n'), ((2598, 2622), 'random.gauss', 'random.gauss', (['(0.05)', '(0.01)'], {}), '(0.05, 0.01)\n', (2610, 2622), False, 'import random\n'), ((2640, 2656), 'batoid.Sphere', 'batoid.Sphere', (['R'], {}), '(R)\n', (2653, 2656), False, 'import batoid\n'), ((2833, 2851), 'batoid.Sphere', 'batoid.Sphere', (['(1.0)'], {}), '(1.0)\n', (2846, 2851), False, 'import batoid\n'), ((2861, 2879), 'batoid.Sphere', 'batoid.Sphere', (['(2.0)'], {}), '(2.0)\n', (2874, 2879), False, 'import batoid\n'), ((2889, 2903), 'batoid.Plane', 'batoid.Plane', ([], {}), '()\n', (2901, 2903), False, 'import batoid\n'), ((520, 553), 'random.uniform', 'random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {}), '(-0.7 * R, 0.7 * R)\n', (534, 553), False, 'import random\n'), ((566, 599), 'random.uniform', 'random.uniform', (['(-0.7 * R)', '(0.7 * R)'], {}), '(-0.7 * R, 0.7 * R)\n', (580, 599), False, 'import random\n'), ((1487, 1509), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1499, 1509), False, 'import random\n'), ((1526, 1548), 'random.gauss', 'random.gauss', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (1538, 1548), False, 'import random\n'), ((1676, 1711), 'batoid.Ray', 'batoid.Ray', (['x', 'y', '(-1000)', '(0)', '(0)', '(1)', '(0)'], {}), '(x, y, -1000, 0, 0, 1, 0)\n', (1686, 1711), False, 'import batoid\n'), ((1761, 1798), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r.r[0]', 'x'], {}), '(r.r[0], x)\n', (1787, 1798), True, 'import numpy as np\n'), ((1811, 1848), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['r.r[1]', 'y'], {}), '(r.r[1], y)\n', (1837, 1848), True, 'import numpy as np\n'), ((2471, 2493), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2483, 2493), False, 'import random\n'), ((2186, 2208), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2198, 2208), False, 'import random\n'), ((2233, 2255), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2245, 2255), False, 'import random\n'), ((2280, 2303), 'random.gauss', 'random.gauss', (['(10.0)', '(0.1)'], {}), '(10.0, 0.1)\n', (2292, 2303), False, 'import random\n'), ((2329, 2351), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2341, 2351), False, 'import random\n'), ((2376, 2398), 'random.gauss', 'random.gauss', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (2388, 2398), False, 'import random\n'), ((2423, 2446), 'random.gauss', 'random.gauss', (['(-1.0)', '(0.1)'], {}), '(-1.0, 0.1)\n', (2435, 2446), False, 'import random\n'), ((1041, 1079), 'numpy.sqrt', 'np.sqrt', (['(1.0 - (x * x + y * y) / R / R)'], {}), '(1.0 - (x * x + y * y) / R / R)\n', (1048, 1079), True, 'import numpy as np\n'), ((686, 726), 'math.sqrt', 'math.sqrt', (['(1.0 - (x * x + y * y) / R / R)'], {}), '(1.0 - (x * x + y * y) / R / R)\n', (695, 726), False, 'import math\n'), ((1227, 1265), 'numpy.sqrt', 'np.sqrt', (['(1.0 - (x * x + y * y) / R / R)'], {}), '(1.0 - (x * x + y * y) / R / R)\n', (1234, 1265), True, 'import numpy as np\n')] |
from django.http.response import HttpResponseForbidden
from .models import Counter, VisitLog
from .utils import get_client_ip
class SiteStatistics(object):
visit_log = None
def process_request(self, request):
if request.path_info.startswith('/admin/'):
return
counter = Counter.objects.first()
counter.value += 1
counter.save()
try:
self.visit_log = VisitLog()
self.visit_log.user_id = request.session.get('openid', '')[:128]
user_info = ''
openkey = request.session.get('openkey', '')
nick_name = request.session.get('nick_name', '')
if openkey or nick_name:
user_info = nick_name + ' ' + openkey
self.visit_log.user_info = user_info[:255]
self.visit_log.path = request.path[:1024]
self.visit_log.method = request.method
self.visit_log.ip = get_client_ip(request)
self.visit_log.user_agent = request.META['HTTP_USER_AGENT'][:1024]
self.visit_log.query = request.META['QUERY_STRING'][:1024]
self.visit_log.body = request.body[:4096]
self.visit_log.response_length = -1
self.visit_log.save()
except Exception as e:
print(e)
def process_response(self, request, response):
try:
if self.visit_log:
self.visit_log.response_code = response.status_code
if hasattr(response, 'content'):
self.visit_log.response_length = len(response.content)
self.visit_log.response_body = response.content[:4096]
elif 'Content-Length' in response:
self.visit_log.response_length = response['Content-Length']
else:
self.visit_log.response_length = -2
self.visit_log.save()
except Exception as e:
print(e)
return response
class BanUser(object):
ban_openid_list = (
"144115212352913603",
)
ban_nick_name_list = (
"453413024",
)
ban_ip_list = (
"172.16.31.10",
)
def process_request(self, request):
ip = get_client_ip(request)
if ip in self.ban_ip_list:
return HttpResponseForbidden('Banned IP')
openid = request.session.get('openid')
if openid and openid in self.ban_openid_list:
return HttpResponseForbidden('Banned openid')
nick_name = request.session.get('nick_name')
if nick_name and nick_name in self.ban_nick_name_list:
return HttpResponseForbidden('Banned QQ')
| [
"django.http.response.HttpResponseForbidden"
] | [((2312, 2346), 'django.http.response.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Banned IP"""'], {}), "('Banned IP')\n", (2333, 2346), False, 'from django.http.response import HttpResponseForbidden\n'), ((2467, 2505), 'django.http.response.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Banned openid"""'], {}), "('Banned openid')\n", (2488, 2505), False, 'from django.http.response import HttpResponseForbidden\n'), ((2641, 2675), 'django.http.response.HttpResponseForbidden', 'HttpResponseForbidden', (['"""Banned QQ"""'], {}), "('Banned QQ')\n", (2662, 2675), False, 'from django.http.response import HttpResponseForbidden\n')] |
#
# Copyright (c) 2019-2022, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bdb_tools.readers import build_reader
ws_cols = ["ws_item_sk", "ws_sold_date_sk", "ws_quantity"]
item_cols = ["i_item_sk", "i_current_price"]
imp_cols = [
"imp_item_sk",
"imp_competitor_price",
"imp_start_date",
"imp_end_date",
"imp_sk",
]
ss_cols = ["ss_item_sk", "ss_sold_date_sk", "ss_quantity"]
def read_tables(config, c=None):
table_reader = build_reader(
data_format=config["file_format"],
basepath=config["data_dir"],
split_row_groups=config["split_row_groups"],
backend=config["backend"],
)
### read tables
ws_df = table_reader.read("web_sales", relevant_cols=ws_cols)
item_df = table_reader.read("item", relevant_cols=item_cols)
imp_df = table_reader.read("item_marketprices", relevant_cols=imp_cols)
ss_df = table_reader.read("store_sales", relevant_cols=ss_cols)
if c:
c.create_table("web_sales", ws_df, persist=False)
c.create_table("item", item_df, persist=False)
c.create_table("item_marketprices", imp_df, persist=False)
c.create_table("store_sales", ss_df, persist=False)
return ws_df, item_df, imp_df, ss_df
| [
"bdb_tools.readers.build_reader"
] | [((976, 1128), 'bdb_tools.readers.build_reader', 'build_reader', ([], {'data_format': "config['file_format']", 'basepath': "config['data_dir']", 'split_row_groups': "config['split_row_groups']", 'backend': "config['backend']"}), "(data_format=config['file_format'], basepath=config['data_dir'],\n split_row_groups=config['split_row_groups'], backend=config['backend'])\n", (988, 1128), False, 'from bdb_tools.readers import build_reader\n')] |
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.utils.channel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
# Standard Imports
import tensorflow as tf
from tfx.utils import channel
from tfx.utils import types
class ChannelTest(tf.test.TestCase):
def test_valid_channel(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
chnl = channel.Channel(
'MyTypeName', static_artifact_collection=[instance_a, instance_b])
self.assertEqual(chnl.type_name, 'MyTypeName')
self.assertItemsEqual(chnl.get(), [instance_a, instance_b])
def test_invalid_channel_type(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
with self.assertRaises(ValueError):
channel.Channel(
'AnotherTypeName',
static_artifact_collection=[instance_a, instance_b])
def test_artifact_collection_as_channel(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
chnl = channel.as_channel([instance_a, instance_b])
self.assertEqual(chnl.type_name, 'MyTypeName')
self.assertItemsEqual(chnl.get(), [instance_a, instance_b])
def test_channel_as_channel_success(self):
instance_a = types.TfxArtifact('MyTypeName')
instance_b = types.TfxArtifact('MyTypeName')
chnl_original = channel.Channel(
'MyTypeName', static_artifact_collection=[instance_a, instance_b])
chnl_result = channel.as_channel(chnl_original)
self.assertEqual(chnl_original, chnl_result)
def test_empty_artifact_collection_as_channel_fail(self):
with self.assertRaises(ValueError):
channel.as_channel([])
def test_invalid_source_as_channel_fail(self):
with self.assertRaises(ValueError):
channel.as_channel(source='invalid source')
def test_type_check_success(self):
chnl = channel.Channel('MyTypeName')
chnl.type_check('MyTypeName')
def test_type_check_fail(self):
chnl = channel.Channel('MyTypeName')
with self.assertRaises(TypeError):
chnl.type_check('AnotherTypeName')
if __name__ == '__main__':
tf.test.main()
| [
"tfx.utils.types.TfxArtifact",
"tfx.utils.channel.as_channel",
"tfx.utils.channel.Channel",
"tensorflow.test.main"
] | [((2811, 2825), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (2823, 2825), True, 'import tensorflow as tf\n'), ((973, 1004), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (990, 1004), False, 'from tfx.utils import types\n'), ((1022, 1053), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (1039, 1053), False, 'from tfx.utils import types\n'), ((1065, 1151), 'tfx.utils.channel.Channel', 'channel.Channel', (['"""MyTypeName"""'], {'static_artifact_collection': '[instance_a, instance_b]'}), "('MyTypeName', static_artifact_collection=[instance_a,\n instance_b])\n", (1080, 1151), False, 'from tfx.utils import channel\n'), ((1329, 1360), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (1346, 1360), False, 'from tfx.utils import types\n'), ((1378, 1409), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (1395, 1409), False, 'from tfx.utils import types\n'), ((1632, 1663), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (1649, 1663), False, 'from tfx.utils import types\n'), ((1681, 1712), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (1698, 1712), False, 'from tfx.utils import types\n'), ((1724, 1768), 'tfx.utils.channel.as_channel', 'channel.as_channel', (['[instance_a, instance_b]'], {}), '([instance_a, instance_b])\n', (1742, 1768), False, 'from tfx.utils import channel\n'), ((1947, 1978), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (1964, 1978), False, 'from tfx.utils import types\n'), ((1996, 2027), 'tfx.utils.types.TfxArtifact', 'types.TfxArtifact', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (2013, 2027), False, 'from tfx.utils import types\n'), ((2048, 2134), 'tfx.utils.channel.Channel', 'channel.Channel', (['"""MyTypeName"""'], {'static_artifact_collection': '[instance_a, instance_b]'}), "('MyTypeName', static_artifact_collection=[instance_a,\n instance_b])\n", (2063, 2134), False, 'from tfx.utils import channel\n'), ((2158, 2191), 'tfx.utils.channel.as_channel', 'channel.as_channel', (['chnl_original'], {}), '(chnl_original)\n', (2176, 2191), False, 'from tfx.utils import channel\n'), ((2560, 2589), 'tfx.utils.channel.Channel', 'channel.Channel', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (2575, 2589), False, 'from tfx.utils import channel\n'), ((2670, 2699), 'tfx.utils.channel.Channel', 'channel.Channel', (['"""MyTypeName"""'], {}), "('MyTypeName')\n", (2685, 2699), False, 'from tfx.utils import channel\n'), ((1456, 1547), 'tfx.utils.channel.Channel', 'channel.Channel', (['"""AnotherTypeName"""'], {'static_artifact_collection': '[instance_a, instance_b]'}), "('AnotherTypeName', static_artifact_collection=[instance_a,\n instance_b])\n", (1471, 1547), False, 'from tfx.utils import channel\n'), ((2348, 2370), 'tfx.utils.channel.as_channel', 'channel.as_channel', (['[]'], {}), '([])\n', (2366, 2370), False, 'from tfx.utils import channel\n'), ((2467, 2510), 'tfx.utils.channel.as_channel', 'channel.as_channel', ([], {'source': '"""invalid source"""'}), "(source='invalid source')\n", (2485, 2510), False, 'from tfx.utils import channel\n')] |
from functools import partial
class counter:
"""
A counter decorator to track how many times a function is called.
"""
def __init__(self, func):
self.func = func
self.count = 0
def __call__(self, *args, **kwargs):
self.count += 1
return self.func(*args, **kwargs)
def register_as_decorator(func):
"""
Register extensions, transforms, or addons function as decorator.
"""
def wrapper(*args, **kwargs):
# If argument length < 2, user just provides function name without its
# resolver. So return partial function. Otherwise, return original
# function.
if len(args) < 2:
return partial(func, *args, **kwargs)
return partial(func, *args, **kwargs)()
return wrapper
| [
"functools.partial"
] | [((694, 724), 'functools.partial', 'partial', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (701, 724), False, 'from functools import partial\n'), ((740, 770), 'functools.partial', 'partial', (['func', '*args'], {}), '(func, *args, **kwargs)\n', (747, 770), False, 'from functools import partial\n')] |
#|-----------------------------------------------------------------------------
#| This source code is provided under the Apache 2.0 license --
#| and is provided AS IS with no warranty or guarantee of fit for purpose. --
#| See the project's LICENSE.md for details. --
#| Copyright (C) 2017-2020 Refinitiv. All rights reserved. --
#|-----------------------------------------------------------------------------
#!/usr/bin/env python
""" Simple example of outputting Market Price JSON data using Websockets with authentication """
import sys
import time
import getopt
import requests
import socket
import json
import websocket
import threading
from threading import Thread, Event
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Global Default Variables
app_id = '555'
auth_hostname = '127.0.0.1'
auth_port = '8443'
hostname = '127.0.0.1'
password = ''
position = socket.gethostbyname(socket.gethostname())
token = ''
user = ''
port = '15000'
# Global Variables
web_socket_app = None
web_socket_open = False
def process_message(ws, message_json):
""" Parse at high level and output JSON of message """
message_type = message_json['Type']
if message_type == "Refresh":
if 'Domain' in message_json:
message_domain = message_json['Domain']
if message_domain == "Login":
process_login_response(ws, message_json)
elif message_type == "Ping":
pong_json = { 'Type':'Pong' }
ws.send(json.dumps(pong_json))
print("SENT:")
print(json.dumps(pong_json, sort_keys=True, indent=2, separators=(',', ':')))
def process_login_response(ws, message_json):
""" Send item request """
send_market_price_request(ws)
def send_market_price_request(ws):
""" Create and send simple Market Price request """
mp_req_json = {
'ID': 2,
'Key': {
'Name': 'TRI.N',
},
}
ws.send(json.dumps(mp_req_json))
print("SENT:")
print(json.dumps(mp_req_json, sort_keys=True, indent=2, separators=(',', ':')))
def on_message(ws, message):
""" Called when message received, parse message into JSON for processing """
print("RECEIVED: ")
message_json = json.loads(message)
print(json.dumps(message_json, sort_keys=True, indent=2, separators=(',', ':')))
for singleMsg in message_json:
process_message(ws, singleMsg)
def on_error(ws, error):
""" Called when websocket error has occurred """
print(error)
def on_close(ws, close_status_code, close_msg):
""" Called when websocket is closed """
global web_socket_open
web_socket_open = False
print("WebSocket Closed")
def on_open(ws):
""" Called when handshake is complete and websocket is open, send login """
print("WebSocket successfully connected!")
global web_socket_open
web_socket_open = True
if __name__ == "__main__":
# Get command line parameters
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["help", "hostname=", "port=", "app_id=", "user=", "password=", "position=", "auth_hostname=", "auth_port="])
except getopt.GetoptError:
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(2)
for opt, arg in opts:
if opt in ("--help"):
print('Usage: market_price_authentication.py [--hostname hostname] [--port port] [--app_id app_id] [--user user] [--password password] [--position position] [--auth_hostname auth_hostname] [--auth_port auth_port] [--help]')
sys.exit(0)
elif opt in ("--hostname"):
hostname = arg
elif opt in ("--port"):
port = arg
elif opt in ("--app_id"):
app_id = arg
elif opt in ("--user"):
user = arg
elif opt in ("--password"):
password = arg
elif opt in ("--position"):
position = arg
elif opt in ("--auth_hostname"):
auth_hostname = arg
elif opt in ("--auth_port"):
auth_port = arg
# Send login info for authentication token
print("Sending authentication request...")
r = requests.post('https://{}:{}/getToken'.format(auth_hostname, auth_port),
data={'username': user, 'password': password},
verify=True)
auth_json = r.json()
print("RECEIVED:")
print(json.dumps(auth_json, sort_keys=True, indent=2, separators=(',', ':')))
if auth_json['success'] is True:
token = r.cookies['AuthToken']
print('Authentication Succeeded. Received AuthToken: {}'.format(token))
cookie = "AuthToken={};AuthPosition={};applicationId={};".format(token, position, app_id)
# Start websocket handshake
ws_address = "ws://{}:{}/WebSocket".format(hostname, port)
print("Connecting to WebSocket " + ws_address + " ...")
web_socket_app = websocket.WebSocketApp(ws_address, on_message=on_message,
on_error=on_error,
on_close=on_close,
subprotocols=['tr_json2'],
cookie=cookie)
web_socket_app.on_open = on_open
# Event loop
wst = threading.Thread(target=web_socket_app.run_forever)
wst.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
web_socket_app.close()
else:
print('Authentication failed')
| [
"getopt.getopt",
"json.loads",
"requests.packages.urllib3.disable_warnings",
"json.dumps",
"websocket.WebSocketApp",
"time.sleep",
"sys.exit",
"threading.Thread",
"socket.gethostname"
] | [((823, 889), 'requests.packages.urllib3.disable_warnings', 'requests.packages.urllib3.disable_warnings', (['InsecureRequestWarning'], {}), '(InsecureRequestWarning)\n', (865, 889), False, 'import requests\n'), ((1049, 1069), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1067, 1069), False, 'import socket\n'), ((2355, 2374), 'json.loads', 'json.loads', (['message'], {}), '(message)\n', (2365, 2374), False, 'import json\n'), ((2072, 2095), 'json.dumps', 'json.dumps', (['mp_req_json'], {}), '(mp_req_json)\n', (2082, 2095), False, 'import json\n'), ((2126, 2198), 'json.dumps', 'json.dumps', (['mp_req_json'], {'sort_keys': '(True)', 'indent': '(2)', 'separators': "(',', ':')"}), "(mp_req_json, sort_keys=True, indent=2, separators=(',', ':'))\n", (2136, 2198), False, 'import json\n'), ((2385, 2458), 'json.dumps', 'json.dumps', (['message_json'], {'sort_keys': '(True)', 'indent': '(2)', 'separators': "(',', ':')"}), "(message_json, sort_keys=True, indent=2, separators=(',', ':'))\n", (2395, 2458), False, 'import json\n'), ((3106, 3251), 'getopt.getopt', 'getopt.getopt', (['sys.argv[1:]', '""""""', "['help', 'hostname=', 'port=', 'app_id=', 'user=', 'password=', 'position=',\n 'auth_hostname=', 'auth_port=']"], {}), "(sys.argv[1:], '', ['help', 'hostname=', 'port=', 'app_id=',\n 'user=', 'password=', 'position=', 'auth_hostname=', 'auth_port='])\n", (3119, 3251), False, 'import getopt\n'), ((4681, 4751), 'json.dumps', 'json.dumps', (['auth_json'], {'sort_keys': '(True)', 'indent': '(2)', 'separators': "(',', ':')"}), "(auth_json, sort_keys=True, indent=2, separators=(',', ':'))\n", (4691, 4751), False, 'import json\n'), ((5218, 5359), 'websocket.WebSocketApp', 'websocket.WebSocketApp', (['ws_address'], {'on_message': 'on_message', 'on_error': 'on_error', 'on_close': 'on_close', 'subprotocols': "['tr_json2']", 'cookie': 'cookie'}), "(ws_address, on_message=on_message, on_error=on_error,\n on_close=on_close, subprotocols=['tr_json2'], cookie=cookie)\n", (5240, 5359), False, 'import websocket\n'), ((5625, 5676), 'threading.Thread', 'threading.Thread', ([], {'target': 'web_socket_app.run_forever'}), '(target=web_socket_app.run_forever)\n', (5641, 5676), False, 'import threading\n'), ((3519, 3530), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (3527, 3530), False, 'import sys\n'), ((3835, 3846), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (3843, 3846), False, 'import sys\n'), ((1623, 1644), 'json.dumps', 'json.dumps', (['pong_json'], {}), '(pong_json)\n', (1633, 1644), False, 'import json\n'), ((1683, 1753), 'json.dumps', 'json.dumps', (['pong_json'], {'sort_keys': '(True)', 'indent': '(2)', 'separators': "(',', ':')"}), "(pong_json, sort_keys=True, indent=2, separators=(',', ':'))\n", (1693, 1753), False, 'import json\n'), ((5751, 5764), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (5761, 5764), False, 'import time\n')] |
from django.urls import reverse_lazy, reverse
from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect
from requests.auth import HTTPBasicAuth
from .models import User, Node
from .forms import CustomUserCreationForm, UserCreationForm
from django.views.generic import ListView
from django.views.generic.edit import UpdateView
from django.views import View
from django.views import generic
import requests
from users.serializers import *
from django.contrib.messages.views import SuccessMessageMixin
from django.contrib.auth.mixins import LoginRequiredMixin
import json
class UserList(LoginRequiredMixin, ListView):
"""Lists all users on the server."""
model = User
template_name = "user_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['login_user'] = self.request.user
context['friends'] = self.request.user.friends.all()
context['followers'] = self.request.user.followers.all()
context['following'] = self.request.user.following.all()
context['incomingFriendRequest'] = self.request.user.incomingRequests.all()
context['sendFriendRequest'] = self.request.user.outgoingRequests.all()
return context
def get_queryset(self):
qs = super().get_queryset()
qs = qs.filter(is_active=True).order_by("username")
n = Node.objects.all().values_list('user_auth', flat=True)
qs = qs.exclude(id__in=n)
return qs
class FriendList(LoginRequiredMixin, ListView):
"""This view lists all friends of logged in user."""
model = User
template_name = "friends_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = get_object_or_404(User, username=self.kwargs['username'])
context['following'] = user.following.all()
return context
def get_queryset(self):
qs = super().get_queryset()
user = get_object_or_404(User, username=self.kwargs['username'])
return user.friends.all()
class FollowerList(LoginRequiredMixin, ListView):
"""This view lists all the followers of logged in user. """
model = User
template_name = "followers_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = get_object_or_404(User, username=self.kwargs['username'])
context['friends'] = user.friends.all()
context['following'] = user.following.all()
return context
def get_queryset(self):
qs = super().get_queryset()
user = get_object_or_404(User, username=self.kwargs['username'])
return user.followers.all()
class FollowingList(LoginRequiredMixin, ListView):
"""This view lists all the followers of logged in user. """
model = User
template_name = "following_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = get_object_or_404(User, username=self.kwargs['username'])
context['friends'] = user.friends.all()
context['following'] = user.following.all()
return context
def get_queryset(self):
qs = super().get_queryset()
user = get_object_or_404(User, username=self.kwargs['username'])
return user.following.all()
class SendFriendRequest(LoginRequiredMixin, View):
def post(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
# print("friend_id ", friend_id)
friend = get_object_or_404(User, id=friend_id)
#friend is on our host
print(str(friend.host))
if(friend.host is None):
print('local')
friend.incomingRequests.add(self.request.user)
self.request.user.outgoingRequests.add(friend)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
return HttpResponse(200)
#friend is on another host
else:
friend_host = get_object_or_404(Node, hostname=friend.host.hostname)
link = str(friend_host)+'friendrequest'
print("LINK ", link)
validated_friend=FriendRequestUsers(friend)
validated_user=FriendRequestUsers(self.request.user)
friend.incomingRequests.add(self.request.user)
self.request.user.outgoingRequests.add(friend)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
returnDict = dict()
returnDict['query'] = 'friendrequest'
returnDict['author']=validated_user.data
returnDict['friend']=validated_friend.data
print(json.dumps(returnDict))
friend_request = requests.post(link,
auth=HTTPBasicAuth(friend_host.send_username,friend_host.send_password),
headers={"Content-type":"application/json"},
data=json.dumps(returnDict)
)
print("CODE", friend_request.status_code)
return HttpResponse(200)
class ConfirmFriendRequest(LoginRequiredMixin, View):
def post(self, requst):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
if friend in self.request.user.incomingRequests.all():
self.request.user.friends.add(friend)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
friend.outgoingRequests.remove(self.request.user)
self.request.user.incomingRequests.remove(friend)
return HttpResponse(status=200)
return HttpResponse(status=404)
class CustomUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ('first_name', 'last_name', 'email') + UserCreationForm.Meta.fields
class SignUp(generic.CreateView):
form_class = CustomUserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
success_message = "Congratulations, you've successfully signed up! Wait to be approved."
class DeleteFriend(LoginRequiredMixin, View):
model = User
def delete(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
if friend:
self.request.user.friends.remove(friend_id)
context = {'object_list': self.request.user.friends.all()}
return render(request, 'friends_list.html', context)
class AccountSettingsView(LoginRequiredMixin, UpdateView):
model = User
fields = ['display_name', 'github', 'bio', 'is_active']
template_name = 'account_settings.html'
def get_object(self):
return self.request.user
def get_success_url(self):
return reverse('profile', kwargs={'username': self.request.user.username})
class FriendRequests(LoginRequiredMixin, ListView):
"""This view lists all the pending friend requests. """
model = User
template_name = 'pending_friend_requests.html'
def get_queryset(self):
q = self.request.user.incomingRequests.all()
return q
class Unfollow(LoginRequiredMixin, View):
model = User
def post(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
friend.followers.remove(self.request.user.id)
self.request.user.following.remove(friend)
context = {'friends_list': self.request.user.friends.all(),
'following_list': self.request.user.following.all()
}
return render(request, 'friends_list.html', context)
class Follow(LoginRequiredMixin, View):
model = User
def post(self, request):
body_unicode = self.request.body.decode('utf-8')
body = json.loads(body_unicode)
friend_id = body['id']
friend = get_object_or_404(User, id=friend_id)
friend.followers.add(self.request.user)
self.request.user.following.add(friend)
context = {'friend_list': self.request.user.friends.all(),
'following_list': self.request.user.following.all()
}
return render(request, 'friends_list.html', context) | [
"django.shortcuts.render",
"json.loads",
"requests.auth.HTTPBasicAuth",
"django.shortcuts.HttpResponse",
"django.shortcuts.get_object_or_404",
"json.dumps",
"django.urls.reverse_lazy",
"django.urls.reverse"
] | [((6322, 6343), 'django.urls.reverse_lazy', 'reverse_lazy', (['"""login"""'], {}), "('login')\n", (6334, 6343), False, 'from django.urls import reverse_lazy, reverse\n'), ((1795, 1852), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': "self.kwargs['username']"}), "(User, username=self.kwargs['username'])\n", (1812, 1852), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((2009, 2066), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': "self.kwargs['username']"}), "(User, username=self.kwargs['username'])\n", (2026, 2066), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((2386, 2443), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': "self.kwargs['username']"}), "(User, username=self.kwargs['username'])\n", (2403, 2443), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((2647, 2704), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': "self.kwargs['username']"}), "(User, username=self.kwargs['username'])\n", (2664, 2704), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((3027, 3084), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': "self.kwargs['username']"}), "(User, username=self.kwargs['username'])\n", (3044, 3084), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((3288, 3345), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'username': "self.kwargs['username']"}), "(User, username=self.kwargs['username'])\n", (3305, 3345), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((3536, 3560), 'json.loads', 'json.loads', (['body_unicode'], {}), '(body_unicode)\n', (3546, 3560), False, 'import json\n'), ((3650, 3687), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'id': 'friend_id'}), '(User, id=friend_id)\n', (3667, 3687), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((5482, 5506), 'json.loads', 'json.loads', (['body_unicode'], {}), '(body_unicode)\n', (5492, 5506), False, 'import json\n'), ((5555, 5592), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'id': 'friend_id'}), '(User, id=friend_id)\n', (5572, 5592), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((6008, 6032), 'django.shortcuts.HttpResponse', 'HttpResponse', ([], {'status': '(404)'}), '(status=404)\n', (6020, 6032), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((6639, 6663), 'json.loads', 'json.loads', (['body_unicode'], {}), '(body_unicode)\n', (6649, 6663), False, 'import json\n'), ((6712, 6749), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'id': 'friend_id'}), '(User, id=friend_id)\n', (6729, 6749), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((7250, 7317), 'django.urls.reverse', 'reverse', (['"""profile"""'], {'kwargs': "{'username': self.request.user.username}"}), "('profile', kwargs={'username': self.request.user.username})\n", (7257, 7317), False, 'from django.urls import reverse_lazy, reverse\n'), ((7760, 7784), 'json.loads', 'json.loads', (['body_unicode'], {}), '(body_unicode)\n', (7770, 7784), False, 'import json\n'), ((7833, 7870), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'id': 'friend_id'}), '(User, id=friend_id)\n', (7850, 7870), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((8133, 8178), 'django.shortcuts.render', 'render', (['request', '"""friends_list.html"""', 'context'], {}), "(request, 'friends_list.html', context)\n", (8139, 8178), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((8339, 8363), 'json.loads', 'json.loads', (['body_unicode'], {}), '(body_unicode)\n', (8349, 8363), False, 'import json\n'), ((8412, 8449), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['User'], {'id': 'friend_id'}), '(User, id=friend_id)\n', (8429, 8449), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((8702, 8747), 'django.shortcuts.render', 'render', (['request', '"""friends_list.html"""', 'context'], {}), "(request, 'friends_list.html', context)\n", (8708, 8747), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((4052, 4069), 'django.shortcuts.HttpResponse', 'HttpResponse', (['(200)'], {}), '(200)\n', (4064, 4069), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((4145, 4199), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Node'], {'hostname': 'friend.host.hostname'}), '(Node, hostname=friend.host.hostname)\n', (4162, 4199), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((5306, 5323), 'django.shortcuts.HttpResponse', 'HttpResponse', (['(200)'], {}), '(200)\n', (5318, 5323), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((5968, 5992), 'django.shortcuts.HttpResponse', 'HttpResponse', ([], {'status': '(200)'}), '(status=200)\n', (5980, 5992), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((6915, 6960), 'django.shortcuts.render', 'render', (['request', '"""friends_list.html"""', 'context'], {}), "(request, 'friends_list.html', context)\n", (6921, 6960), False, 'from django.shortcuts import get_object_or_404, render, HttpResponse, HttpResponseRedirect\n'), ((4838, 4860), 'json.dumps', 'json.dumps', (['returnDict'], {}), '(returnDict)\n', (4848, 4860), False, 'import json\n'), ((4959, 5026), 'requests.auth.HTTPBasicAuth', 'HTTPBasicAuth', (['friend_host.send_username', 'friend_host.send_password'], {}), '(friend_host.send_username, friend_host.send_password)\n', (4972, 5026), False, 'from requests.auth import HTTPBasicAuth\n'), ((5163, 5185), 'json.dumps', 'json.dumps', (['returnDict'], {}), '(returnDict)\n', (5173, 5185), False, 'import json\n')] |
#from weakref import WeakValueDictionary
import random, operator, weakref
def format_service_group(group):
"""pretty prints the group"""
rstr = '%s [%s]'
if group.cover != None: # Spy for IntSec
return rstr % (group.cover, group.cover.firm)
elif group.spyon != None:
return rstr % (group.spyon, group.spyon.firm)
else:
return rstr % (group, group.firm)
def format_society(society):
rstr = '%s (degree: %s)'
if society.cover != None:
return rstr % (society.cover, society.cover.degree)
elif society.spyon != None:
return rstr % (society.spyon.name, society.spyon.degree)
else:
return rstr % (society.name, society.degree)
def format_power(char):
rstr = '%s'
if char.registered:
rstr += ' [registered]'
return rstr % char.power
def build_skill_table(skill):
"""makes an nx2 table of the skill's specs where n = len(skill.specs)"""
table = [[spec, skill[spec]] for spec in skill]
table.sort(lambda x, y: cmp(x[0], y[0]))
if 'Energy Weapons' not in skill:
table.append(['________________________', '__'])
table.append(['________________________', '__'])
table.append(['________________________', '__'])
table.append(['________________________', '__'])
return table
class tag(int): pass
class weightedchoice(object):
__slots__ = ['cache']
cache = {}
def __new__(cls, lst):
lid = id(lst)
try:
return random.choice(weightedchoice.cache[lid])
except KeyError:
weightedchoice.cache[lid] = reduce(operator.add, [[item for n in xrange(weight)] for weight, item in lst])
return random.choice(weightedchoice.cache[lid]) | [
"random.choice"
] | [((1349, 1389), 'random.choice', 'random.choice', (['weightedchoice.cache[lid]'], {}), '(weightedchoice.cache[lid])\n', (1362, 1389), False, 'import random, operator, weakref\n'), ((1529, 1569), 'random.choice', 'random.choice', (['weightedchoice.cache[lid]'], {}), '(weightedchoice.cache[lid])\n', (1542, 1569), False, 'import random, operator, weakref\n')] |
import streamlit as st
import pandas as pd
import numpy as np
import plotly.express as px
st.title("Relatório de Aula")
df = pd.read_csv('data/emocoes.csv')
agg = pd.read_csv('data/agg.csv')
Engajado = df[df['emocao'] == 'Engajado']
Engajado_agg = Engajado.groupby(['emocao', 'pessoa']).size().reset_index(name='size')
Engajado_agg = Engajado_agg.sort_values(by=['size'], ascending=False)
emotions_count = df.value_counts('emocao').reset_index()
def is_authenticated(password):
return password == "<PASSWORD>"
def generate_time_agg_graph():
fig = px.line(agg, x="tempo", y="size", labels= { 'tempo': 'tempo (s)',
'size': 'número de alunos' }, color='emocao', title='Emoções ao longo do tempo')
st.plotly_chart(fig, use_container_width=True)
def generate_top_students():
st.markdown('<br/>', unsafe_allow_html=True)
st.markdown("<center style='font-size:2em'=>Alunos Mais Engajados</center>", unsafe_allow_html=True)
top_three = Engajado_agg.head(3).to_numpy()
for row in top_three:
st.markdown(f"<center><span style='color:#00FF00;font-size:1.5em'>{row[1]}</span></center>", unsafe_allow_html=True)
st.markdown('<br/>', unsafe_allow_html=True)
def generate_bottom_students():
st.markdown("<center style='font-size:2em'>Alunos Menos Engajados</center>", unsafe_allow_html=True)
bottom_three = np.flip(Engajado_agg.tail(3).to_numpy(), 0)
for row in bottom_three:
st.write(f"<center><span style='color:red;font-size:1.5em'>{row[1]}</span></center>", unsafe_allow_html=True)
st.markdown('<br/> <br/>', unsafe_allow_html=True)
def generate_emotions_pizza():
fig = px.pie(emotions_count, values=emotions_count.index, names='emocao', title='Predominância de Emoções')
st.plotly_chart(fig, use_container_width=True)
def generate_login_block():
block1 = st.empty()
block2 = st.empty()
return block1, block2
def clean_blocks(blocks):
for block in blocks:
block.empty()
def graph_columns():
generate_time_agg_graph()
generate_top_students()
generate_bottom_students()
generate_emotions_pizza()
def login(blocks):
return blocks[1].text_input('ID da Aula')
login_blocks = generate_login_block()
password = login(login_blocks)
drive_block = st.empty()
google_drive = drive_block.text_input('Link da aula para processamento', '')
id_block = st.empty()
if google_drive != '':
drive_block.empty()
id_block.text("ID da Aula processada: 182916f6-756d-40d6-95fc-3283ba5efdf8")
if is_authenticated(password):
id_block.empty()
drive_block.empty()
clean_blocks(login_blocks)
st.balloons()
graph_columns()
elif password:
st.info("Aula não encontrada. Por favor, insira um ID válido.") | [
"plotly.express.pie",
"streamlit.markdown",
"pandas.read_csv",
"streamlit.balloons",
"streamlit.write",
"streamlit.empty",
"plotly.express.line",
"streamlit.info",
"streamlit.plotly_chart",
"streamlit.title"
] | [((91, 120), 'streamlit.title', 'st.title', (['"""Relatório de Aula"""'], {}), "('Relatório de Aula')\n", (99, 120), True, 'import streamlit as st\n'), ((126, 157), 'pandas.read_csv', 'pd.read_csv', (['"""data/emocoes.csv"""'], {}), "('data/emocoes.csv')\n", (137, 157), True, 'import pandas as pd\n'), ((164, 191), 'pandas.read_csv', 'pd.read_csv', (['"""data/agg.csv"""'], {}), "('data/agg.csv')\n", (175, 191), True, 'import pandas as pd\n'), ((2327, 2337), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (2335, 2337), True, 'import streamlit as st\n'), ((2426, 2436), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (2434, 2436), True, 'import streamlit as st\n'), ((562, 709), 'plotly.express.line', 'px.line', (['agg'], {'x': '"""tempo"""', 'y': '"""size"""', 'labels': "{'tempo': 'tempo (s)', 'size': 'número de alunos'}", 'color': '"""emocao"""', 'title': '"""Emoções ao longo do tempo"""'}), "(agg, x='tempo', y='size', labels={'tempo': 'tempo (s)', 'size':\n 'número de alunos'}, color='emocao', title='Emoções ao longo do tempo')\n", (569, 709), True, 'import plotly.express as px\n'), ((767, 813), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (782, 813), True, 'import streamlit as st\n'), ((853, 897), 'streamlit.markdown', 'st.markdown', (['"""<br/>"""'], {'unsafe_allow_html': '(True)'}), "('<br/>', unsafe_allow_html=True)\n", (864, 897), True, 'import streamlit as st\n'), ((902, 1006), 'streamlit.markdown', 'st.markdown', (['"""<center style=\'font-size:2em\'=>Alunos Mais Engajados</center>"""'], {'unsafe_allow_html': '(True)'}), '("<center style=\'font-size:2em\'=>Alunos Mais Engajados</center>",\n unsafe_allow_html=True)\n', (913, 1006), True, 'import streamlit as st\n'), ((1206, 1250), 'streamlit.markdown', 'st.markdown', (['"""<br/>"""'], {'unsafe_allow_html': '(True)'}), "('<br/>', unsafe_allow_html=True)\n", (1217, 1250), True, 'import streamlit as st\n'), ((1289, 1393), 'streamlit.markdown', 'st.markdown', (['"""<center style=\'font-size:2em\'>Alunos Menos Engajados</center>"""'], {'unsafe_allow_html': '(True)'}), '("<center style=\'font-size:2em\'>Alunos Menos Engajados</center>",\n unsafe_allow_html=True)\n', (1300, 1393), True, 'import streamlit as st\n'), ((1604, 1654), 'streamlit.markdown', 'st.markdown', (['"""<br/> <br/>"""'], {'unsafe_allow_html': '(True)'}), "('<br/> <br/>', unsafe_allow_html=True)\n", (1615, 1654), True, 'import streamlit as st\n'), ((1698, 1804), 'plotly.express.pie', 'px.pie', (['emotions_count'], {'values': 'emotions_count.index', 'names': '"""emocao"""', 'title': '"""Predominância de Emoções"""'}), "(emotions_count, values=emotions_count.index, names='emocao', title=\n 'Predominância de Emoções')\n", (1704, 1804), True, 'import plotly.express as px\n'), ((1804, 1850), 'streamlit.plotly_chart', 'st.plotly_chart', (['fig'], {'use_container_width': '(True)'}), '(fig, use_container_width=True)\n', (1819, 1850), True, 'import streamlit as st\n'), ((1897, 1907), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1905, 1907), True, 'import streamlit as st\n'), ((1921, 1931), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (1929, 1931), True, 'import streamlit as st\n'), ((2678, 2691), 'streamlit.balloons', 'st.balloons', ([], {}), '()\n', (2689, 2691), True, 'import streamlit as st\n'), ((1085, 1211), 'streamlit.markdown', 'st.markdown', (['f"""<center><span style=\'color:#00FF00;font-size:1.5em\'>{row[1]}</span></center>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<center><span style=\'color:#00FF00;font-size:1.5em\'>{row[1]}</span></center>"\n , unsafe_allow_html=True)\n', (1096, 1211), True, 'import streamlit as st\n'), ((1490, 1609), 'streamlit.write', 'st.write', (['f"""<center><span style=\'color:red;font-size:1.5em\'>{row[1]}</span></center>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<center><span style=\'color:red;font-size:1.5em\'>{row[1]}</span></center>"\n , unsafe_allow_html=True)\n', (1498, 1609), True, 'import streamlit as st\n'), ((2731, 2794), 'streamlit.info', 'st.info', (['"""Aula não encontrada. Por favor, insira um ID válido."""'], {}), "('Aula não encontrada. Por favor, insira um ID válido.')\n", (2738, 2794), True, 'import streamlit as st\n')] |
import argparse
import json
import os
from os import listdir
from os.path import isfile, join
class RegisterOtherNominate:
# Register the prize winners of each award to the formatting style.
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--directory",
default="movies_other_nominate",
help="path of the json directory",
type=str)
parser.add_argument("-j", "--jsonfile",
default="annual_other_nominate_data.json",
help="path of the other nominate json data",
type=str)
self.args = parser.parse_args()
self.key = 'other_nominate'
self.output = []
self.years = range(1977, 2020)
def __call__(self, *args, **kwargs):
self.files = self.create_files_list()
self.modify_index()
self.dump_data()
def create_files_list(self):
extension = '.json'
files = [int(f.rstrip(extension)) for f in listdir(self.args.directory)
if isfile(join(self.args.directory, f))]
files.sort()
return [self.args.directory + '/' + str(f) + extension for f in files]
def _filter_by_year(self, lst, year):
for elm in lst:
if elm['year'] == year:
yield elm
def modify_index(self):
with open(self.args.jsonfile, 'r') as jsonfile:
other_nominate = json.load(jsonfile)
# OPTIMIZE: this nests too deep ...
for year in self.years:
current = list(self._filter_by_year(other_nominate, year))
if not current:
continue
add_data = current[0]
movielist = '../{}_movie_clean'.format(year)
year_data = []
for prize in add_data['prize_winners']:
if os.path.exists(movielist):
with open(movielist) as f:
for movie in f:
index, title = movie.split('\t')[0:2]
index = int(index)
if title == prize['work']['title']:
add_prize = prize
add_prize['work']['index'] = index
year_data.append(add_prize)
break
else:
year_data.append(prize)
add_data['prize_winners'] = year_data
self.output.append(add_data)
with open(self.args.jsonfile, 'w') as jsonfile:
json.dump(self.output, jsonfile,
ensure_ascii=False,
indent=4,
separators=(',', ':'))
jsonfile.write('\n')
def dump_data(self):
for year in self.years:
movielist = '../{}_movie_clean'.format(year)
if os.path.exists(movielist):
with open(movielist) as f:
for movie in f:
nominates = []
index, title = movie.split('\t')[0:2]
index = int(index)
file_name = ('movies_other_nominate/{year}/{index}.json'
.format(year=year, index=index))
for award in self._filter_by_year(self.output, year):
for winner in award['prize_winners']:
result = {}
i = winner['work']['index']
if index == i:
nominates.append({
'nominate_name': winner['award'],
})
result['title'] = title
result['other_nominate'] = nominates
with open(file_name, 'w') as wf:
json.dump(result, wf,
ensure_ascii=False,
indent=4,
separators=(',', ':'))
wf.write('\n')
def main():
register_other_nominate = RegisterOtherNominate()
register_other_nominate()
if __name__ == '__main__':
main()
| [
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"os.path.join",
"json.load",
"json.dump"
] | [((240, 265), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (263, 265), False, 'import argparse\n'), ((1533, 1552), 'json.load', 'json.load', (['jsonfile'], {}), '(jsonfile)\n', (1542, 1552), False, 'import json\n'), ((2696, 2786), 'json.dump', 'json.dump', (['self.output', 'jsonfile'], {'ensure_ascii': '(False)', 'indent': '(4)', 'separators': "(',', ':')"}), "(self.output, jsonfile, ensure_ascii=False, indent=4, separators=(\n ',', ':'))\n", (2705, 2786), False, 'import json\n'), ((3011, 3036), 'os.path.exists', 'os.path.exists', (['movielist'], {}), '(movielist)\n', (3025, 3036), False, 'import os\n'), ((1103, 1131), 'os.listdir', 'listdir', (['self.args.directory'], {}), '(self.args.directory)\n', (1110, 1131), False, 'from os import listdir\n'), ((1946, 1971), 'os.path.exists', 'os.path.exists', (['movielist'], {}), '(movielist)\n', (1960, 1971), False, 'import os\n'), ((1159, 1187), 'os.path.join', 'join', (['self.args.directory', 'f'], {}), '(self.args.directory, f)\n', (1163, 1187), False, 'from os.path import isfile, join\n'), ((4072, 4146), 'json.dump', 'json.dump', (['result', 'wf'], {'ensure_ascii': '(False)', 'indent': '(4)', 'separators': "(',', ':')"}), "(result, wf, ensure_ascii=False, indent=4, separators=(',', ':'))\n", (4081, 4146), False, 'import json\n')] |
import dataclasses as dc
from typing import Any, Dict, Iterable, List, Optional
from loguru import logger
from mate3.field_values import FieldValue, ModelValues
from mate3.read import AllModelReads
from mate3.sunspec.fields import IntegerField
from mate3.sunspec.model_base import Model
from mate3.sunspec.models import (
ChargeControllerConfigurationModel,
ChargeControllerModel,
FLEXnetDCConfigurationModel,
FLEXnetDCRealTimeModel,
FXInverterConfigurationModel,
FXInverterRealTimeModel,
OutBackModel,
OutBackSystemControlModel,
RadianInverterConfigurationModel,
SinglePhaseRadianInverterRealTimeModel,
SplitPhaseRadianInverterRealTimeModel,
)
from mate3.sunspec.values import (
ChargeControllerConfigurationValues,
ChargeControllerValues,
FLEXnetDCConfigurationValues,
FLEXnetDCRealTimeValues,
FXInverterConfigurationValues,
FXInverterRealTimeValues,
OPTICSPacketStatisticsValues,
OutBackSystemControlValues,
OutBackValues,
RadianInverterConfigurationValues,
SinglePhaseRadianInverterRealTimeValues,
SplitPhaseRadianInverterRealTimeValues,
)
@dc.dataclass
class ChargeControllerDeviceValues(ChargeControllerValues):
"""
Simple wrapper to combine the value and config models.
"""
config: ChargeControllerConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class FNDCDeviceValues(FLEXnetDCRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: FLEXnetDCConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class FXInverterDeviceValues(FXInverterRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: FXInverterConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class SinglePhaseRadianInverterDeviceValues(SinglePhaseRadianInverterRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: RadianInverterConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class SplitPhaseRadianInverterDeviceValues(SplitPhaseRadianInverterRealTimeValues):
"""
Simple wrapper to combine the real-time and config models.
"""
config: RadianInverterConfigurationValues = dc.field(metadata={"field": False})
@dc.dataclass
class Mate3DeviceValues(OutBackValues):
"""
Simple wrapper to combine the value and config models.
"""
config: OutBackSystemControlValues = dc.field(metadata={"field": False})
class DeviceValues:
"""
This is basically a way for storing state (i.e. current values) about all devices. It's the main interface for users
to access values etc.
"""
def __init__(self, client):
self._client = client
self.mate3s: Dict[None, Mate3DeviceValues] = {}
self.charge_controllers: Dict[int, ChargeControllerDeviceValues] = {}
self.fndcs: Dict[int, FNDCDeviceValues] = {}
self.fx_inverters: Dict[int, FXInverterDeviceValues] = {}
self.single_phase_radian_inverters: Dict[int, SinglePhaseRadianInverterDeviceValues] = {}
self.split_phase_radian_inverters: Dict[int, SplitPhaseRadianInverterDeviceValues] = {}
self.optics: Optional[OPTICSPacketStatisticsValues] = None
@property
def connected_devices(self) -> Iterable[ModelValues]:
# First ones with only a single device:
for d in ("mate3", "optics"):
device = getattr(self, d)
if device:
yield device
# Now those with device and config. (NB: we're explicit here as opposed to relying on hasattr(device, 'config')
# just in case a model actually had a 'config' field.)
for d in (
"charge_controllers",
"fndcs",
"fx_inverters",
"single_phase_radian_inverters",
"split_phase_radian_inverters",
):
for device in getattr(self, d).values():
yield device
yield device.config
def _get_single_device(self, name: str) -> ModelValues:
"""
Helper function so that e.g. if there's only one charge controller in self.charge_controllers, you can call
self.charge_controller to get it.
"""
devices = getattr(self, f"{name}s")
if len(devices) != 1:
raise RuntimeError(
(
f"Must be one, and only one, {name} device to be able to use `{name}` attribute - but there are "
f"{len(devices)}"
)
)
return list(devices.values())[0]
@property
def mate3(self) -> Mate3DeviceValues:
"""
Return the mate3.
"""
return self._get_single_device("mate3")
@property
def charge_controller(self) -> ChargeControllerDeviceValues:
"""
Return the charge controller if there's only one.
"""
return self._get_single_device("charge_controller")
@property
def fndc(self) -> FNDCDeviceValues:
"""
Return the FNDC if there's only one.
"""
return self._get_single_device("fndc")
@property
def fx_inverter(self) -> FXInverterDeviceValues:
"""
Return the FX inverter if there's only one.
"""
return self._get_single_device("fx_inverter")
@property
def single_phase_radian_inverter(self) -> SinglePhaseRadianInverterDeviceValues:
"""
Return the single phase radian inverter if there's only one.
"""
return self._get_single_device("single_phase_radian_inverter")
@property
def split_phase_radian_inverter(self) -> SplitPhaseRadianInverterDeviceValues:
"""
Return the split phase radian inverter if there's only one.
"""
return self._get_single_device("split_phase_radian_inverter")
def update(self, all_reads: AllModelReads) -> None:
"""
This is the key method, and is used to update the state of the devices with new values.
"""
# Update mate:
self._update_model_and_config(
all_reads=all_reads,
model_class=OutBackModel,
config_class=OutBackSystemControlModel,
config_values_class=OutBackSystemControlValues,
device_values=self.mate3s,
device_class=Mate3DeviceValues,
)
# Charge controller
self._update_model_and_config(
all_reads=all_reads,
model_class=ChargeControllerModel,
config_class=ChargeControllerConfigurationModel,
config_values_class=ChargeControllerConfigurationValues,
device_values=self.charge_controllers,
device_class=ChargeControllerDeviceValues,
)
# FNDCs
self._update_model_and_config(
all_reads=all_reads,
model_class=FLEXnetDCRealTimeModel,
config_class=FLEXnetDCConfigurationModel,
config_values_class=FLEXnetDCConfigurationValues,
device_values=self.fndcs,
device_class=FNDCDeviceValues,
)
# FX inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=FXInverterRealTimeModel,
config_class=FXInverterConfigurationModel,
config_values_class=FXInverterConfigurationValues,
device_values=self.fx_inverters,
device_class=FXInverterDeviceValues,
)
# Single phase radian inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=SinglePhaseRadianInverterRealTimeModel,
config_class=RadianInverterConfigurationModel,
config_values_class=RadianInverterConfigurationValues,
device_values=self.single_phase_radian_inverters,
device_class=SinglePhaseRadianInverterDeviceValues,
)
# Split phase radian inverters
self._update_model_and_config(
all_reads=all_reads,
model_class=SplitPhaseRadianInverterRealTimeModel,
config_class=RadianInverterConfigurationModel,
config_values_class=RadianInverterConfigurationValues,
device_values=self.split_phase_radian_inverters,
device_class=SplitPhaseRadianInverterDeviceValues,
)
def _update_model_and_config(
self,
all_reads: AllModelReads,
model_class: Model,
config_class: Model,
config_values_class: ModelValues,
device_values: Dict[int, ModelValues],
device_class: ModelValues,
) -> None:
model_field_reads_per_port = all_reads.get_reads_per_model_by_port(model_class)
config_field_reads_per_port = all_reads.get_reads_per_model_by_port(config_class)
# OK, there's a few options around whether the above variables contain anything.
# - Both present, then we're good - continue. All devices should have a configuration class.
# - Model isn't present - this means the device itself wasn't detected, so ignore. Note that usually this would
# imply the config class is null (since the config shouldn't be there if the device isn't) except in the case
# of Radian inverters, as the same config class is shared across both single and split phase devices (so that
# if only one type is present, the other will have empty model values and non-empty config).
# - Both are missing - this is covered by the above.
# So, the short summary is we only care about devices where the model field values are present, and in all other
# cases there *should* be config field values too.
if model_field_reads_per_port is None:
return
else:
if config_field_reads_per_port is None:
logger.warning(
(
f"Only model ({model_class}) field values and no config ({config_class}) fields were read. This"
f" is undefined behaviour, so ignoring {model_class}."
)
)
return
# Check model and config have the same ports:
if set(model_field_reads_per_port).symmetric_difference(set(config_field_reads_per_port)):
raise RuntimeError("Config and models have different ports!")
# Create/update any devices for the given ports:
for port in model_field_reads_per_port:
model_reads_this_port = model_field_reads_per_port[port]
config_reads_this_port = config_field_reads_per_port[port]
if port not in device_values:
# OK, it's new - create it:
config_values = self._create_new_model_values(
model=config_class,
values_class=config_values_class,
device_address=config_reads_this_port["did"].address,
)
device_values[port] = self._create_new_model_values(
model=model_class,
values_class=device_class,
device_address=model_reads_this_port["did"].address,
config=config_values,
)
# Either way, update the field values:
for reads, device_val in (
(model_reads_this_port, device_values[port]),
(config_reads_this_port, device_values[port].config),
):
for field_name, field_read in reads.items():
field_value = getattr(device_val, field_name)
field_value._raw_value = field_read.raw_value
field_value._implemented = field_read.implemented
field_value._last_read = field_read.time
# If there are any ports that were used for this device, but are no longer, remove them:
old_device_ports = set(list(device_values.keys())) - set(model_field_reads_per_port.keys())
for port in old_device_ports:
logger.warning(
f"Device(s) of model {model_class} on ports {old_device_ports} have disappeared. These will be ignored."
)
del device_values[port]
def _create_new_model_values(
self, model: Model, values_class: ModelValues, device_address: int, config: Optional[ModelValues] = None
):
# Create empty FieldValues
field_values = {}
scale_factors = {}
for field in model.fields():
address = device_address + field.start - 1
field_values[field.name] = FieldValue(
client=self._client,
field=field,
address=address,
scale_factor=None,
raw_value=None,
implemented=True,
read_time=None,
)
if isinstance(field, IntegerField) and field.scale_factor is not None:
scale_factors[field.name] = field.scale_factor.name
# Now assign scale factors:
for field, scale_factor in scale_factors.items():
field_values[field]._scale_factor = field_values[scale_factor]
kwargs = {"model": model, "address": device_address, **field_values}
return values_class(**kwargs) if config is None else values_class(config=config, **kwargs)
| [
"loguru.logger.warning",
"dataclasses.field",
"mate3.field_values.FieldValue"
] | [((1340, 1375), 'dataclasses.field', 'dc.field', ([], {'metadata': "{'field': False}"}), "(metadata={'field': False})\n", (1348, 1375), True, 'import dataclasses as dc\n'), ((1564, 1599), 'dataclasses.field', 'dc.field', ([], {'metadata': "{'field': False}"}), "(metadata={'field': False})\n", (1572, 1599), True, 'import dataclasses as dc\n'), ((1796, 1831), 'dataclasses.field', 'dc.field', ([], {'metadata': "{'field': False}"}), "(metadata={'field': False})\n", (1804, 1831), True, 'import dataclasses as dc\n'), ((2062, 2097), 'dataclasses.field', 'dc.field', ([], {'metadata': "{'field': False}"}), "(metadata={'field': False})\n", (2070, 2097), True, 'import dataclasses as dc\n'), ((2326, 2361), 'dataclasses.field', 'dc.field', ([], {'metadata': "{'field': False}"}), "(metadata={'field': False})\n", (2334, 2361), True, 'import dataclasses as dc\n'), ((2535, 2570), 'dataclasses.field', 'dc.field', ([], {'metadata': "{'field': False}"}), "(metadata={'field': False})\n", (2543, 2570), True, 'import dataclasses as dc\n'), ((12156, 12286), 'loguru.logger.warning', 'logger.warning', (['f"""Device(s) of model {model_class} on ports {old_device_ports} have disappeared. These will be ignored."""'], {}), "(\n f'Device(s) of model {model_class} on ports {old_device_ports} have disappeared. These will be ignored.'\n )\n", (12170, 12286), False, 'from loguru import logger\n'), ((12718, 12853), 'mate3.field_values.FieldValue', 'FieldValue', ([], {'client': 'self._client', 'field': 'field', 'address': 'address', 'scale_factor': 'None', 'raw_value': 'None', 'implemented': '(True)', 'read_time': 'None'}), '(client=self._client, field=field, address=address, scale_factor=\n None, raw_value=None, implemented=True, read_time=None)\n', (12728, 12853), False, 'from mate3.field_values import FieldValue, ModelValues\n'), ((9948, 10121), 'loguru.logger.warning', 'logger.warning', (['f"""Only model ({model_class}) field values and no config ({config_class}) fields were read. This is undefined behaviour, so ignoring {model_class}."""'], {}), "(\n f'Only model ({model_class}) field values and no config ({config_class}) fields were read. This is undefined behaviour, so ignoring {model_class}.'\n )\n", (9962, 10121), False, 'from loguru import logger\n')] |
#!/usr/bin/env python
import rospy
from srv_sub_pub.srv import *
NAME = "add_two_ints_server"
def add_two_ints(req):
print("Returning [%s + %s = %s]" % (req.a, req.b, (req.a + req.b)))
return AddTwoIntsResponse(req.a + req.b)
def add_two_ints_server():
rospy.init_node(NAME)
s = rospy.Service('add_two_ints', AddTwoInts, add_two_ints)
# spin() keeps Python from exiting until node is shutdown
rospy.spin()
if __name__ == "__main__":
add_two_ints_server()
| [
"rospy.init_node",
"rospy.Service",
"rospy.spin"
] | [((270, 291), 'rospy.init_node', 'rospy.init_node', (['NAME'], {}), '(NAME)\n', (285, 291), False, 'import rospy\n'), ((300, 355), 'rospy.Service', 'rospy.Service', (['"""add_two_ints"""', 'AddTwoInts', 'add_two_ints'], {}), "('add_two_ints', AddTwoInts, add_two_ints)\n", (313, 355), False, 'import rospy\n'), ((423, 435), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (433, 435), False, 'import rospy\n')] |
import multiprocessing as mp
class ModuleRecursion(object):
"""Class to handle recursion.
Simple class to handle tracking and storing prior
sub-domains discovred.
"""
def __init__(self):
"""class init.
"""
self.recursion_queue = mp.Queue()
def add_subdomain(self, domain):
"""add subdomain to Q.
uses a non-blocking call to add to the Q
to prevent any errors with size.
Arguments:
domain {str} -- subdomain to add to Q
"""
self.recursion_queue.put(domain)
def get_subdomain_list(self, valid_only=True):
"""build subdomain list.
Using the JSON from the event consumer, we
can easily build a unique list of
subdomains for module use.
Keyword Arguments:
valid_only {bool} -- filter only valid subdomains (default: {True})
Returns:
list -- list of raw subdomains
"""
data = []
refill = []
while True:
try:
x = self.recursion_queue.get_nowait()
if valid_only and x.valid:
data.append(x.subdomain)
if not valid_only:
data.append(x.subdomain)
except Exception as e:
print(e)
break
return set(data) | [
"multiprocessing.Queue"
] | [((248, 258), 'multiprocessing.Queue', 'mp.Queue', ([], {}), '()\n', (256, 258), True, 'import multiprocessing as mp\n')] |
import os
from pyspark import StorageLevel
from geospark.core.SpatialRDD import PolygonRDD
from geospark.core.enums import IndexType, FileDataSplitter
from geospark.core.geom.envelope import Envelope
from geospark.core.spatialOperator import RangeQuery
from tests.test_base import TestBase
from tests.tools import tests_path
input_location = os.path.join(tests_path, "resources/primaryroads-polygon.csv")
splitter = FileDataSplitter.CSV
gridType = "rtree"
indexType = "rtree"
class TestPolygonRange(TestBase):
loop_times = 5
query_envelope = Envelope(-85.01, -60.01, 34.01, 50.01)
def test_spatial_range_query(self):
spatial_rdd = PolygonRDD(
self.sc, input_location, splitter, True, StorageLevel.MEMORY_ONLY
)
for i in range(self.loop_times):
result_size = RangeQuery.\
SpatialRangeQuery(spatial_rdd, self.query_envelope, False, False).count()
assert result_size == 704
assert RangeQuery.SpatialRangeQuery(
spatial_rdd, self.query_envelope, False, False).take(10)[0].getUserData() is not None
def test_spatial_range_query_using_index(self):
spatial_rdd = PolygonRDD(
self.sc, input_location, splitter, True, StorageLevel.MEMORY_ONLY
)
spatial_rdd.buildIndex(IndexType.RTREE, False)
for i in range(self.loop_times):
result_size = RangeQuery.\
SpatialRangeQuery(spatial_rdd, self.query_envelope, False, False).count()
assert result_size == 704
assert RangeQuery.SpatialRangeQuery(
spatial_rdd, self.query_envelope, False, False).take(10)[0].getUserData() is not None
| [
"geospark.core.SpatialRDD.PolygonRDD",
"geospark.core.spatialOperator.RangeQuery.SpatialRangeQuery",
"os.path.join",
"geospark.core.geom.envelope.Envelope"
] | [((345, 407), 'os.path.join', 'os.path.join', (['tests_path', '"""resources/primaryroads-polygon.csv"""'], {}), "(tests_path, 'resources/primaryroads-polygon.csv')\n", (357, 407), False, 'import os\n'), ((555, 593), 'geospark.core.geom.envelope.Envelope', 'Envelope', (['(-85.01)', '(-60.01)', '(34.01)', '(50.01)'], {}), '(-85.01, -60.01, 34.01, 50.01)\n', (563, 593), False, 'from geospark.core.geom.envelope import Envelope\n'), ((657, 734), 'geospark.core.SpatialRDD.PolygonRDD', 'PolygonRDD', (['self.sc', 'input_location', 'splitter', '(True)', 'StorageLevel.MEMORY_ONLY'], {}), '(self.sc, input_location, splitter, True, StorageLevel.MEMORY_ONLY)\n', (667, 734), False, 'from geospark.core.SpatialRDD import PolygonRDD\n'), ((1184, 1261), 'geospark.core.SpatialRDD.PolygonRDD', 'PolygonRDD', (['self.sc', 'input_location', 'splitter', '(True)', 'StorageLevel.MEMORY_ONLY'], {}), '(self.sc, input_location, splitter, True, StorageLevel.MEMORY_ONLY)\n', (1194, 1261), False, 'from geospark.core.SpatialRDD import PolygonRDD\n'), ((824, 900), 'geospark.core.spatialOperator.RangeQuery.SpatialRangeQuery', 'RangeQuery.SpatialRangeQuery', (['spatial_rdd', 'self.query_envelope', '(False)', '(False)'], {}), '(spatial_rdd, self.query_envelope, False, False)\n', (852, 900), False, 'from geospark.core.spatialOperator import RangeQuery\n'), ((1406, 1482), 'geospark.core.spatialOperator.RangeQuery.SpatialRangeQuery', 'RangeQuery.SpatialRangeQuery', (['spatial_rdd', 'self.query_envelope', '(False)', '(False)'], {}), '(spatial_rdd, self.query_envelope, False, False)\n', (1434, 1482), False, 'from geospark.core.spatialOperator import RangeQuery\n'), ((981, 1057), 'geospark.core.spatialOperator.RangeQuery.SpatialRangeQuery', 'RangeQuery.SpatialRangeQuery', (['spatial_rdd', 'self.query_envelope', '(False)', '(False)'], {}), '(spatial_rdd, self.query_envelope, False, False)\n', (1009, 1057), False, 'from geospark.core.spatialOperator import RangeQuery\n'), ((1563, 1639), 'geospark.core.spatialOperator.RangeQuery.SpatialRangeQuery', 'RangeQuery.SpatialRangeQuery', (['spatial_rdd', 'self.query_envelope', '(False)', '(False)'], {}), '(spatial_rdd, self.query_envelope, False, False)\n', (1591, 1639), False, 'from geospark.core.spatialOperator import RangeQuery\n')] |
from open_publishing.core import FieldGroup
from open_publishing.core import FieldDescriptor
from open_publishing.core.enums import CatalogType, VLBCategory, AcademicCategory
from open_publishing.core import SimpleField
from open_publishing.extendable_enum_field import ExtendableEnumField
from open_publishing.genre import GenresList
from open_publishing.bisac import BisacList
from .thema import ThemaList
from .subject import SubjectField
from .series import SeriesList
from .institution import InstitutionField
class CatalogTypeBase(FieldGroup):
_catalog_type = None
def __init__(self,
document):
super(CatalogTypeBase, self).__init__(document)
self._fields['series'] = SeriesList(document)
self._fields['thema'] = ThemaList(document=document)
series = FieldDescriptor('series')
thema = FieldDescriptor('thema')
@property
def catalog_type(self):
return self._catalog_type
class Academic(CatalogTypeBase):
_catalog_type = CatalogType.academic
def __init__(self,
document):
super(Academic, self).__init__(document)
self._fields['subject'] = SubjectField(document=document)
self._fields['category'] = SimpleField(database_object=document,
aspect='academic.*',
dtype=AcademicCategory,
field_locator='academic.category_id',
nullable=True,
serialized_null=0)
self._fields['publication_year'] = SimpleField(database_object=document,
dtype=str,
nullable=True,
aspect='academic.*',
field_locator='academic.year_of_text')
self._fields['institution'] = InstitutionField(document=document)
subject = FieldDescriptor('subject')
category = FieldDescriptor('category')
publication_year = FieldDescriptor('publication_year')
institution = FieldDescriptor('institution')
class NonAcademic(CatalogTypeBase):
_catalog_type = CatalogType.non_academic
def __init__(self,
document = None):
super(NonAcademic, self).__init__(document)
self._fields['publication_year'] = NullableIntField(database_object=document,
aspect='non_academic.*',
field_locator='non_academic.publication_year')
self._fields['copyright_year'] = NullableIntField(database_object=document,
aspect='non_academic.*',
field_locator='non_academic.copyright_year')
self._fields['vlb_category'] = ExtendableEnumField(database_object=document,
aspect='non_academic.*',
field_locator='non_academic.vlb_kat_id',
dtype=VLBCategory,
nullable=True)
self._fields['genres'] = GenresList(document)
self._fields['bisac'] = BisacList(document=document)
publication_year = FieldDescriptor('publication_year')
copyright_year = FieldDescriptor('copyright_year')
vlb_category = FieldDescriptor('vlb_category')
bisac = FieldDescriptor('bisac')
genres = FieldDescriptor('genres')
class NullableIntField(SimpleField):
def __init__(self,
database_object,
aspect,
field_locator):
super(NullableIntField, self).__init__(database_object,
aspect,
field_locator)
def _parse_value(self,
value):
if value == '':
return None
else :
return int(value)
def _value_validation(self,
value):
if value is None or isinstance(value, int):
return value
else:
raise ValueError('expected int or None, got {0}'.format(value))
def _serialize_value(self,
value):
return str(value) if value is not None else ''
| [
"open_publishing.bisac.BisacList",
"open_publishing.genre.GenresList",
"open_publishing.core.FieldDescriptor",
"open_publishing.extendable_enum_field.ExtendableEnumField",
"open_publishing.core.SimpleField"
] | [((814, 839), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""series"""'], {}), "('series')\n", (829, 839), False, 'from open_publishing.core import FieldDescriptor\n'), ((852, 876), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""thema"""'], {}), "('thema')\n", (867, 876), False, 'from open_publishing.core import FieldDescriptor\n'), ((2098, 2124), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""subject"""'], {}), "('subject')\n", (2113, 2124), False, 'from open_publishing.core import FieldDescriptor\n'), ((2140, 2167), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""category"""'], {}), "('category')\n", (2155, 2167), False, 'from open_publishing.core import FieldDescriptor\n'), ((2191, 2226), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""publication_year"""'], {}), "('publication_year')\n", (2206, 2226), False, 'from open_publishing.core import FieldDescriptor\n'), ((2245, 2275), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""institution"""'], {}), "('institution')\n", (2260, 2275), False, 'from open_publishing.core import FieldDescriptor\n'), ((3578, 3613), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""publication_year"""'], {}), "('publication_year')\n", (3593, 3613), False, 'from open_publishing.core import FieldDescriptor\n'), ((3635, 3668), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""copyright_year"""'], {}), "('copyright_year')\n", (3650, 3668), False, 'from open_publishing.core import FieldDescriptor\n'), ((3688, 3719), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""vlb_category"""'], {}), "('vlb_category')\n", (3703, 3719), False, 'from open_publishing.core import FieldDescriptor\n'), ((3732, 3756), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""bisac"""'], {}), "('bisac')\n", (3747, 3756), False, 'from open_publishing.core import FieldDescriptor\n'), ((3770, 3795), 'open_publishing.core.FieldDescriptor', 'FieldDescriptor', (['"""genres"""'], {}), "('genres')\n", (3785, 3795), False, 'from open_publishing.core import FieldDescriptor\n'), ((1230, 1393), 'open_publishing.core.SimpleField', 'SimpleField', ([], {'database_object': 'document', 'aspect': '"""academic.*"""', 'dtype': 'AcademicCategory', 'field_locator': '"""academic.category_id"""', 'nullable': '(True)', 'serialized_null': '(0)'}), "(database_object=document, aspect='academic.*', dtype=\n AcademicCategory, field_locator='academic.category_id', nullable=True,\n serialized_null=0)\n", (1241, 1393), False, 'from open_publishing.core import SimpleField\n'), ((1663, 1791), 'open_publishing.core.SimpleField', 'SimpleField', ([], {'database_object': 'document', 'dtype': 'str', 'nullable': '(True)', 'aspect': '"""academic.*"""', 'field_locator': '"""academic.year_of_text"""'}), "(database_object=document, dtype=str, nullable=True, aspect=\n 'academic.*', field_locator='academic.year_of_text')\n", (1674, 1791), False, 'from open_publishing.core import SimpleField\n'), ((3056, 3205), 'open_publishing.extendable_enum_field.ExtendableEnumField', 'ExtendableEnumField', ([], {'database_object': 'document', 'aspect': '"""non_academic.*"""', 'field_locator': '"""non_academic.vlb_kat_id"""', 'dtype': 'VLBCategory', 'nullable': '(True)'}), "(database_object=document, aspect='non_academic.*',\n field_locator='non_academic.vlb_kat_id', dtype=VLBCategory, nullable=True)\n", (3075, 3205), False, 'from open_publishing.extendable_enum_field import ExtendableEnumField\n'), ((3472, 3492), 'open_publishing.genre.GenresList', 'GenresList', (['document'], {}), '(document)\n', (3482, 3492), False, 'from open_publishing.genre import GenresList\n'), ((3525, 3553), 'open_publishing.bisac.BisacList', 'BisacList', ([], {'document': 'document'}), '(document=document)\n', (3534, 3553), False, 'from open_publishing.bisac import BisacList\n')] |
import urllib.request,json
from .models import Source,Article
# Getting Api Key
api_key = None
#Getting the base urls
source_base_url = None
article_base_url = None
def configure_request(app):
global api_key,source_base_url,article_base_url
api_key = app.config['SOURCE_API_KEY']
source_base_url = app.config['SOURCE_BASE_URL']
article_base_url = app.config['ARTICLE_BASE_URL']
def get_sources(category):
'''
Function that gets the json response to our url request
'''
get_sources_url = source_base_url.format(category,api_key)
with urllib.request.urlopen(get_sources_url) as url:
get_sources_data = url.read()
get_sources_response = json.loads(get_sources_data)
source_results = None
if get_sources_response['sources']:
source_results_list = get_sources_response['sources']
source_results = process_results(source_results_list)
return source_results
def process_results(source_list):
'''
Function that processes the source result and transform them to a list of Objects
Args:
source_list: A list of dictionaries that contain source details
Returns :
source_results: A list of source objects
'''
source_results = []
for source_item in source_list:
id = source_item.get('id')
name = source_item.get('name')
description = source_item.get('description')
url = source_item.get('url')
category = source_item.get('category')
language = source_item.get('language')
country = source_item.get('country')
if url:
source_object = Source(id,name,description,url,category,language,country)
source_results.append(source_object)
return source_results
def get_articles(id):
get_article_url = article_base_url.format(id,api_key)
with urllib.request.urlopen(get_article_url) as url:
get_article_data = url.read()
get_article_response = json.loads(get_article_data)
source_object = None
if get_article_response['articles']:
article_results_list = get_article_response['articles']
article_results = process_results(article_results_list)
return article_results
| [
"json.loads"
] | [((690, 718), 'json.loads', 'json.loads', (['get_sources_data'], {}), '(get_sources_data)\n', (700, 718), False, 'import urllib.request, json\n'), ((1990, 2018), 'json.loads', 'json.loads', (['get_article_data'], {}), '(get_article_data)\n', (2000, 2018), False, 'import urllib.request, json\n')] |
#!/usr/bin/env/python3
"""Recipe for training a wav2vec-based ctc ASR system with librispeech.
The system employs wav2vec as its encoder. Decoding is performed with
ctc greedy decoder.
To run this recipe, do the following:
> python train_with_wav2vec.py hparams/train_with_wav2vec.yaml
The neural network is trained on CTC likelihood target and character units
are used as basic recognition tokens. Training is performed on the full
LibriSpeech dataset (960 h).
Authors
* <NAME> 2021
* <NAME> 2021
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
* <NAME> 2020
"""
import os
import sys
import torch
import logging
import speechbrain as sb
from speechbrain.utils.distributed import run_on_main
from hyperpyyaml import load_hyperpyyaml
from pathlib import Path
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.modules, "env_corrupt"):
wavs_noise = self.modules.env_corrupt(wavs, wav_lens)
wavs = torch.cat([wavs, wavs_noise], dim=0)
wav_lens = torch.cat([wav_lens, wav_lens])
tokens_bos = torch.cat([tokens_bos, tokens_bos], dim=0)
if hasattr(self.hparams, "augmentation"):
wavs = self.hparams.augmentation(wavs, wav_lens)
# Forward pass
feats = self.modules.wav2vec2(wavs)
x = self.modules.enc(feats)
# Compute outputs
p_tokens = None
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
if stage != sb.Stage.TRAIN:
p_tokens = sb.decoders.ctc_greedy_decode(
p_ctc, wav_lens, blank_id=self.hparams.blank_index
)
return p_ctc, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
p_ctc, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
if hasattr(self.modules, "env_corrupt") and stage == sb.Stage.TRAIN:
tokens_eos = torch.cat([tokens_eos, tokens_eos], dim=0)
tokens_eos_lens = torch.cat(
[tokens_eos_lens, tokens_eos_lens], dim=0
)
tokens = torch.cat([tokens, tokens], dim=0)
tokens_lens = torch.cat([tokens_lens, tokens_lens], dim=0)
loss_ctc = self.hparams.ctc_cost(p_ctc, tokens, wav_lens, tokens_lens)
loss = loss_ctc
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = [
"".join(self.tokenizer.decode_ndim(utt_seq)).split(" ")
for utt_seq in predicted_tokens
]
target_words = [wrd.split(" ") for wrd in batch.wrd]
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.wav2vec_optimizer.step()
self.model_optimizer.step()
self.wav2vec_optimizer.zero_grad()
self.model_optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr_model, new_lr_model = self.hparams.lr_annealing_model(
stage_stats["loss"]
)
old_lr_wav2vec, new_lr_wav2vec = self.hparams.lr_annealing_wav2vec(
stage_stats["loss"]
)
sb.nnet.schedulers.update_learning_rate(
self.model_optimizer, new_lr_model
)
sb.nnet.schedulers.update_learning_rate(
self.wav2vec_optimizer, new_lr_wav2vec
)
self.hparams.train_logger.log_stats(
stats_meta={
"epoch": epoch,
"lr_model": old_lr_model,
"lr_wav2vec": old_lr_wav2vec,
},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
def init_optimizers(self):
"Initializes the wav2vec2 optimizer and model optimizer"
self.wav2vec_optimizer = self.hparams.wav2vec_opt_class(
self.modules.wav2vec2.parameters()
)
self.model_optimizer = self.hparams.model_opt_class(
self.hparams.model.parameters()
)
if self.checkpointer is not None:
self.checkpointer.add_recoverable(
"wav2vec_opt", self.wav2vec_optimizer
)
self.checkpointer.add_recoverable("modelopt", self.model_optimizer)
def dataio_prepare(hparams):
"""This function prepares the datasets to be used in the brain class.
It also defines the data processing pipeline through user-defined functions."""
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(sort_key="duration")
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration", reverse=True
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["train_dataloader_opts"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
valid_data = valid_data.filtered_sorted(sort_key="duration")
# test is separate
test_datasets = {}
for csv_file in hparams["test_csv"]:
name = Path(csv_file).stem
test_datasets[name] = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=csv_file, replacements={"data_root": data_folder}
)
test_datasets[name] = test_datasets[name].filtered_sorted(
sort_key="duration"
)
datasets = [train_data, valid_data] + [i for k, i in test_datasets.items()]
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"wrd", "char_list", "tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
yield wrd
char_list = list(wrd)
yield char_list
tokens_list = label_encoder.encode_sequence(char_list)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
lab_enc_file = os.path.join(hparams["save_folder"], "label_encoder.txt")
special_labels = {
"bos_label": hparams["bos_index"],
"eos_label": hparams["eos_index"],
"blank_label": hparams["blank_index"],
}
label_encoder.load_or_create(
path=lab_enc_file,
from_didatasets=[train_data],
output_key="char_list",
special_labels=special_labels,
sequence_input=True,
)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets,
["id", "sig", "wrd", "char_list", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_datasets, label_encoder
if __name__ == "__main__":
# CLI:
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Dataset prep (parsing Librispeech)
from librispeech_prepare import prepare_librispeech # noqa
# multi-gpu (ddp) save data preparation
run_on_main(
prepare_librispeech,
kwargs={
"data_folder": hparams["data_folder"],
"tr_splits": hparams["train_splits"],
"dev_splits": hparams["dev_splits"],
"te_splits": hparams["test_splits"],
"save_folder": hparams["output_folder"],
"merge_lst": hparams["train_splits"],
"merge_name": "train.csv",
"skip_prep": hparams["skip_prep"],
},
)
# here we create the datasets objects as well as tokenization and encoding
train_data, valid_data, test_datasets, label_encoder = dataio_prepare(
hparams
)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
checkpointer=hparams["checkpointer"],
)
# We dynamicaly add the tokenizer to our brain class.
# NB: This tokenizer corresponds to the one used for the LM!!
asr_brain.tokenizer = label_encoder
# Training
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["train_dataloader_opts"],
valid_loader_kwargs=hparams["valid_dataloader_opts"],
)
# Testing
for k in test_datasets.keys(): # keys are test_clean, test_other etc
asr_brain.hparams.wer_file = os.path.join(
hparams["output_folder"], "wer_{}.txt".format(k)
)
asr_brain.evaluate(
test_datasets[k], test_loader_kwargs=hparams["test_dataloader_opts"]
)
| [
"logging.getLogger",
"speechbrain.nnet.schedulers.update_learning_rate",
"speechbrain.dataio.dataio.read_audio",
"torch.LongTensor",
"speechbrain.decoders.ctc_greedy_decode",
"speechbrain.dataio.dataset.DynamicItemDataset.from_csv",
"speechbrain.create_experiment_directory",
"pathlib.Path",
"speechb... | [((785, 812), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (802, 812), False, 'import logging\n'), ((7132, 7253), 'speechbrain.dataio.dataset.DynamicItemDataset.from_csv', 'sb.dataio.dataset.DynamicItemDataset.from_csv', ([], {'csv_path': "hparams['train_csv']", 'replacements': "{'data_root': data_folder}"}), "(csv_path=hparams['train_csv'],\n replacements={'data_root': data_folder})\n", (7177, 7253), True, 'import speechbrain as sb\n'), ((8070, 8191), 'speechbrain.dataio.dataset.DynamicItemDataset.from_csv', 'sb.dataio.dataset.DynamicItemDataset.from_csv', ([], {'csv_path': "hparams['valid_csv']", 'replacements': "{'data_root': data_folder}"}), "(csv_path=hparams['valid_csv'],\n replacements={'data_root': data_folder})\n", (8115, 8191), True, 'import speechbrain as sb\n'), ((8777, 8812), 'speechbrain.utils.data_pipeline.takes', 'sb.utils.data_pipeline.takes', (['"""wav"""'], {}), "('wav')\n", (8805, 8812), True, 'import speechbrain as sb\n'), ((8818, 8856), 'speechbrain.utils.data_pipeline.provides', 'sb.utils.data_pipeline.provides', (['"""sig"""'], {}), "('sig')\n", (8849, 8856), True, 'import speechbrain as sb\n'), ((8957, 9017), 'speechbrain.dataio.dataset.add_dynamic_item', 'sb.dataio.dataset.add_dynamic_item', (['datasets', 'audio_pipeline'], {}), '(datasets, audio_pipeline)\n', (8991, 9017), True, 'import speechbrain as sb\n'), ((9038, 9072), 'speechbrain.dataio.encoder.CTCTextEncoder', 'sb.dataio.encoder.CTCTextEncoder', ([], {}), '()\n', (9070, 9072), True, 'import speechbrain as sb\n'), ((9110, 9145), 'speechbrain.utils.data_pipeline.takes', 'sb.utils.data_pipeline.takes', (['"""wrd"""'], {}), "('wrd')\n", (9138, 9145), True, 'import speechbrain as sb\n'), ((9151, 9259), 'speechbrain.utils.data_pipeline.provides', 'sb.utils.data_pipeline.provides', (['"""wrd"""', '"""char_list"""', '"""tokens_list"""', '"""tokens_bos"""', '"""tokens_eos"""', '"""tokens"""'], {}), "('wrd', 'char_list', 'tokens_list',\n 'tokens_bos', 'tokens_eos', 'tokens')\n", (9182, 9259), True, 'import speechbrain as sb\n'), ((9736, 9795), 'speechbrain.dataio.dataset.add_dynamic_item', 'sb.dataio.dataset.add_dynamic_item', (['datasets', 'text_pipeline'], {}), '(datasets, text_pipeline)\n', (9770, 9795), True, 'import speechbrain as sb\n'), ((9816, 9873), 'os.path.join', 'os.path.join', (["hparams['save_folder']", '"""label_encoder.txt"""'], {}), "(hparams['save_folder'], 'label_encoder.txt')\n", (9828, 9873), False, 'import os\n'), ((10267, 10387), 'speechbrain.dataio.dataset.set_output_keys', 'sb.dataio.dataset.set_output_keys', (['datasets', "['id', 'sig', 'wrd', 'char_list', 'tokens_bos', 'tokens_eos', 'tokens']"], {}), "(datasets, ['id', 'sig', 'wrd',\n 'char_list', 'tokens_bos', 'tokens_eos', 'tokens'])\n", (10300, 10387), True, 'import speechbrain as sb\n'), ((10552, 10584), 'speechbrain.parse_arguments', 'sb.parse_arguments', (['sys.argv[1:]'], {}), '(sys.argv[1:])\n', (10570, 10584), True, 'import speechbrain as sb\n'), ((10689, 10734), 'speechbrain.utils.distributed.ddp_init_group', 'sb.utils.distributed.ddp_init_group', (['run_opts'], {}), '(run_opts)\n', (10724, 10734), True, 'import speechbrain as sb\n'), ((10862, 10999), 'speechbrain.create_experiment_directory', 'sb.create_experiment_directory', ([], {'experiment_directory': "hparams['output_folder']", 'hyperparams_to_save': 'hparams_file', 'overrides': 'overrides'}), "(experiment_directory=hparams['output_folder'\n ], hyperparams_to_save=hparams_file, overrides=overrides)\n", (10892, 10999), True, 'import speechbrain as sb\n'), ((11181, 11537), 'speechbrain.utils.distributed.run_on_main', 'run_on_main', (['prepare_librispeech'], {'kwargs': "{'data_folder': hparams['data_folder'], 'tr_splits': hparams['train_splits'\n ], 'dev_splits': hparams['dev_splits'], 'te_splits': hparams[\n 'test_splits'], 'save_folder': hparams['output_folder'], 'merge_lst':\n hparams['train_splits'], 'merge_name': 'train.csv', 'skip_prep':\n hparams['skip_prep']}"}), "(prepare_librispeech, kwargs={'data_folder': hparams[\n 'data_folder'], 'tr_splits': hparams['train_splits'], 'dev_splits':\n hparams['dev_splits'], 'te_splits': hparams['test_splits'],\n 'save_folder': hparams['output_folder'], 'merge_lst': hparams[\n 'train_splits'], 'merge_name': 'train.csv', 'skip_prep': hparams[\n 'skip_prep']})\n", (11192, 11537), False, 'from speechbrain.utils.distributed import run_on_main\n'), ((8421, 8530), 'speechbrain.dataio.dataset.DynamicItemDataset.from_csv', 'sb.dataio.dataset.DynamicItemDataset.from_csv', ([], {'csv_path': 'csv_file', 'replacements': "{'data_root': data_folder}"}), "(csv_path=csv_file,\n replacements={'data_root': data_folder})\n", (8466, 8530), True, 'import speechbrain as sb\n'), ((8900, 8932), 'speechbrain.dataio.dataio.read_audio', 'sb.dataio.dataio.read_audio', (['wav'], {}), '(wav)\n', (8927, 8932), True, 'import speechbrain as sb\n'), ((9480, 9534), 'torch.LongTensor', 'torch.LongTensor', (["([hparams['bos_index']] + tokens_list)"], {}), "([hparams['bos_index']] + tokens_list)\n", (9496, 9534), False, 'import torch\n'), ((9583, 9637), 'torch.LongTensor', 'torch.LongTensor', (["(tokens_list + [hparams['eos_index']])"], {}), "(tokens_list + [hparams['eos_index']])\n", (9599, 9637), False, 'import torch\n'), ((9680, 9709), 'torch.LongTensor', 'torch.LongTensor', (['tokens_list'], {}), '(tokens_list)\n', (9696, 9709), False, 'import torch\n'), ((10790, 10822), 'hyperpyyaml.load_hyperpyyaml', 'load_hyperpyyaml', (['fin', 'overrides'], {}), '(fin, overrides)\n', (10806, 10822), False, 'from hyperpyyaml import load_hyperpyyaml\n'), ((2000, 2086), 'speechbrain.decoders.ctc_greedy_decode', 'sb.decoders.ctc_greedy_decode', (['p_ctc', 'wav_lens'], {'blank_id': 'self.hparams.blank_index'}), '(p_ctc, wav_lens, blank_id=self.hparams.\n blank_index)\n', (2029, 2086), True, 'import speechbrain as sb\n'), ((2570, 2612), 'torch.cat', 'torch.cat', (['[tokens_eos, tokens_eos]'], {'dim': '(0)'}), '([tokens_eos, tokens_eos], dim=0)\n', (2579, 2612), False, 'import torch\n'), ((2643, 2695), 'torch.cat', 'torch.cat', (['[tokens_eos_lens, tokens_eos_lens]'], {'dim': '(0)'}), '([tokens_eos_lens, tokens_eos_lens], dim=0)\n', (2652, 2695), False, 'import torch\n'), ((2747, 2781), 'torch.cat', 'torch.cat', (['[tokens, tokens]'], {'dim': '(0)'}), '([tokens, tokens], dim=0)\n', (2756, 2781), False, 'import torch\n'), ((2808, 2852), 'torch.cat', 'torch.cat', (['[tokens_lens, tokens_lens]'], {'dim': '(0)'}), '([tokens_lens, tokens_lens], dim=0)\n', (2817, 2852), False, 'import torch\n'), ((4112, 4127), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (4125, 4127), False, 'import torch\n'), ((5298, 5373), 'speechbrain.nnet.schedulers.update_learning_rate', 'sb.nnet.schedulers.update_learning_rate', (['self.model_optimizer', 'new_lr_model'], {}), '(self.model_optimizer, new_lr_model)\n', (5337, 5373), True, 'import speechbrain as sb\n'), ((5416, 5495), 'speechbrain.nnet.schedulers.update_learning_rate', 'sb.nnet.schedulers.update_learning_rate', (['self.wav2vec_optimizer', 'new_lr_wav2vec'], {}), '(self.wav2vec_optimizer, new_lr_wav2vec)\n', (5455, 5495), True, 'import speechbrain as sb\n'), ((8371, 8385), 'pathlib.Path', 'Path', (['csv_file'], {}), '(csv_file)\n', (8375, 8385), False, 'from pathlib import Path\n'), ((1408, 1444), 'torch.cat', 'torch.cat', (['[wavs, wavs_noise]'], {'dim': '(0)'}), '([wavs, wavs_noise], dim=0)\n', (1417, 1444), False, 'import torch\n'), ((1472, 1503), 'torch.cat', 'torch.cat', (['[wav_lens, wav_lens]'], {}), '([wav_lens, wav_lens])\n', (1481, 1503), False, 'import torch\n'), ((1533, 1575), 'torch.cat', 'torch.cat', (['[tokens_bos, tokens_bos]'], {'dim': '(0)'}), '([tokens_bos, tokens_bos], dim=0)\n', (1542, 1575), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 12 18:28:54 2020
@author: Dr <NAME> (CIMAT-CONACYT, Mexico) jac at cimat.mx
Instantaneous reproduction numbers calculations.
Rts_P, Implementation of Cori et al (2013)
Rts_AR, new filtering version using an autoregressive linear model of Capistrán, Capella and Christen (2020):
https://arxiv.org/abs/2012.02168, 05DIC2021
01FEB2021: Some buggs were corrected to avoid error when too low counts are used and for prediction when g=1.
Go directly to __main__ for examples.
"""
import os
from datetime import date, timedelta
from pickle import load, dump
from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones
from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt
from numpy import sum as np_sum
from scipy.stats import erlang, gamma, nbinom, uniform, beta
from scipy.stats import t as t_student
from matplotlib.pyplot import subplots, rcParams, close
from matplotlib.dates import drange
from pytwalk import pytwalk
from plotfrozen import PlotFrozenDist
def Rts_P( data, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt as in:
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
if q == 2: # return a and b of post gamma
rt = zeros(( q, n))
else:
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
for t in range(max(m-n,0), m):
S1 = 0.0
S2 = 0.0
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
S2 += data[(t-k)]
S1 += sum(I * w[(m-(t-k)):]) #\Gamma_k
#print( (Rt_pr_a+S2) * (1/(S1 + 1/Rt_pr_b)), (Rt_pr_a+S2), 1/(S1 + 1/Rt_pr_b))
if simulate:
if q == 2: #Return Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b)
rt[:,t-(m-n)] = Rt_pr_a+S2, 1/(S1 + 1/Rt_pr_b)
else:
rt[:,t-(m-n)] = gamma.rvs( Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b), size=q)
else:
rt[:,t-(m-n)] = gamma.ppf( q, Rt_pr_a+S2, scale=1/(S1 + 1/Rt_pr_b))
return rt
def PlotRts_P( data_fnam, init_date, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', median_color='red', alpha=0.25, ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_P.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_P(data=data[:,1],\
tau=tau, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
"""
def loglikelihood_NB( x, mu, psi):
mu_psi = mu/psi
return -gammaln(x + 1) + gammaln(x + psi) - gammaln(psi)\
-(x + psi)*log(1 + mu_psi) + x*log(mu_psi)
"""
def loglikelihood_NB( x, mu, psi):
return beta.logcdf(x, mu*psi, (1-mu)*psi)
def Rts_NB( data, n=30, tau=7, psi=10, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90]):
"""Calculate Rt Using a Negative Binomial instead of Poisson.
Here one needs to fix psi = 1/theta (= 10).
Extension of (not documented):
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts, for each Rt
Returns: a (len(q), n) array with quantiles of the R_{t,\tau}'s.
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
quantiles = zeros(len(q))
rt = zeros(( len(q), n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rt = zeros(( q, n))
simulate = True
m = len(data)
w = diff(IP_dist.cdf( arange( 0, m+1)))
w /= sum(w)
w = flip(w)
R = linspace( 0.1, 3.0, num=100)
DeltaR = R[1]-R[0]
#omega = 1
#theta = THETA_MEAN #0.01
#psi = 1/theta
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for t in range(max(m-n,0), m):
#S1 = 0.0
log_likelihood_I = zeros(R.shape) ## Same size of array for values for R
if sum(data[:t]) <= 10:# Only for more than 10 counts
continue
for k in range(tau):
I = data[:(t-k)] ## window of reports
Gammak = I @ w[(m-(t-k)):] #\Gamma_k
#S1 += Gammak
I_k = data[(t-k)]
log_likelihood_I += loglikelihood_NB( I_k, R*Gammak, psi)
log_post = log_likelihood_I + gamma.logpdf( R, Rt_pr_a, scale=1/Rt_pr_b)
pdf = exp(log_post)
pdf /= sum(pdf)*DeltaR
cdf = cumsum(pdf)*DeltaR
if simulate:
u = uniform.rvs()
rt[:,t-(m-n)] = R[where(cdf < u)[0][-1]]
else:
for i,qua in enumerate(q):
quantiles[i] = R[where(cdf < qua)[0][-1]]
rt[:,t-(m-n)] = quantiles
return rt
def PlotRts_NB( data_fnam, init_date, psi, trim=0,\
tau=7, n=30, IP_dist=erlang( a=3, scale=8/3), Rt_pr_a=5, Rt_pr_b=5/5,\
q=[10,25,50,75,90], csv_fnam=None, color='blue', ax=None):
"""Makes a board with the Rt evolution for the past n days (n=30).
All parameters are passed to function Rts_NB.
csv_fnam is an optional file name toi save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
if type(data_fnam) == str:
data = loadtxt(data_fnam)
else:
data = data_fnam.copy()
data_fnam = " "
if trim < 0:
data = data[:trim,:]
rts = Rts_NB(data=data[:,1],\
tau=tau, psi=psi, n=n, IP_dist=IP_dist, q=q,\
Rt_pr_a=Rt_pr_a, Rt_pr_b=Rt_pr_b)
m = data.shape[0]
last_date = init_date + timedelta(m)
if ax == None:
fig, ax = subplots(figsize=( n/3, 3.5) )
for i in range(n):
h = rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color='red' )
ax.set_title(data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(n))
ax.set_xticklabels([(last_date-timedelta(n-i)).strftime("%d.%m") for i in range(n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_NB_psi:
def __init__( self, data_fnam, init_date, trim=0, tau=7, n=30, IP_dist=erlang( a=3, scale=8/3),\
Rt_pr_a=5, Rt_pr_b=5/5, q=[10,25,50,75,90], workdir="./../"):
"""Calculate Rt Using a Negative Binomial with unknown psi = 1/theta.
Here one needs to run the MCMC first, RunMCMC.
See example below.
Extension of (not documented):
<NAME>, <NAME>, <NAME>, <NAME>,
A New Framework and Software to Estimate Time-Varying Reproduction Numbers
During Epidemics, American Journal of Epidemiology,
Volume 178, Issue 9, 1 November 2013, Pages 1505–1512,
https://doi.org/10.1093/aje/kwt133
data: array with case incidence.
tau: Use a window tau (default 7) to calculate R_{t,\tau}'s.
n: calculate n R_{t,\tau}'s to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
Rt_pr_a=5, Rt_pr_b=5/5, parameters for the gamma prior for R_t.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
#convolve
self.init_date = init_date
self.m = len(data)
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
self.n = min(self.m, n)
self.tau = tau
self.Rt_pr_a = Rt_pr_a
self.Rt_pr_b = Rt_pr_b
self.prior = gamma( self.Rt_pr_a, scale=1/self.Rt_pr_b)
#omega = 1
self.psi = 100
self.psi_prior = gamma( 3, scale=self.psi/3)
for t in range( self.m - self.n, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.Gammak = zeros(self.m) ##We calculate all gammas previously:
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
if os.path.isfile(workdir + 'output/' + self.data_fnam + '_rts.pkl'): # samples file exists
print("File with rts and psi samples exists, loading rts ...", end=' ')
self.rts = load(open(workdir + 'output/' + self.data_fnam + '_rts.pkl', 'rb'))
self.psi_samples = load(open(workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'rb'))
else:
print("File with rts and psi samples does not exist, run RunMCMC first.")
def logpost( self, Rs, psi):
log_post = 0.0
for t in range( self.m - self.n, self.m):
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)]) +\
np_sum(loglikelihood_NB( self.data[(t-self.tau+1):t], Rs[t-(self.m - self.n)]*tst.Gammak[(t-self.tau+1):t], psi))
#log_post += sum([loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi) for s in range( t-self.tau+1, t)])
"""
for k in range(self.tau):
s = t-k
#I = self.data[:s] ## window of reports
#Gammak = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
#I_k = self.data[s]
log_post += loglikelihood_NB( self.data[s], Rs[t-(self.m - self.n)]*self.Gammak[s], psi)
log_post += self.prior.logpdf( Rs[t-(self.m - self.n)])
"""
return log_post
def sim_init(self):
"""Simulate initial values from the Rts_NB and the prior for psi."""
# Shake the Rts_NB simulation to avoid repeated values
#shake = Rts_NB( self.data*self.Z, tau=self.tau, n=self.n, IP_dist=self.IP_dist,\
# Rt_pr_a=self.Rt_pr_a, Rt_pr_b=self.Rt_pr_b, q=1) + 0.001*uniform.rvs(size=self.n)
shake = ones(self.n) + 0.001*uniform.rvs(size=self.n)
return append( shake, self.psi_prior.rvs(size=1))
#Simulate intial values from the prior.
#return append(self.prior.rvs(size=self.n),self.psi_prior.rvs(size=1))
def support(self, x):
rt = all( (0.1 <= x[:-1]) * (x[:-1] <= 40) ) #Rt's
rt &= (x[-1] > 0.0)
return rt
def RunMCMC( self, T, burnin=5000, q=[10,25,50,75,90]):
"""Run twalk MCMC, T = number of iterations.
burnin, thining = IAT.
"""
#self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], self.psi), Supp =self.support) #Ignore x[-1] = psi
self.twalk = pytwalk(n = self.n+1, U=lambda x: -self.logpost( x[:-1], x[-1]) - self.prior.logpdf(x[-1]), Supp =self.support)
self.twalk.Run( T=T, x0 = self.sim_init(), xp0 = self.sim_init())
self.burnin = burnin
self.Rts(q=q)
dump( self.rts, open(self.workdir + 'output/' + self.data_fnam + '_rts.pkl', 'wb'))
self.psi_samples = self.twalk.Output[self.burnin:, self.n]
dump( self.psi_samples, open(self.workdir + 'output/' + self.data_fnam + '_rts_psi.pkl', 'wb'))
def PlotPostPsi( self, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.psi_samples, density=True)
ax.set_xlabel(r'$\psi$')
def PlotPostRt( self, i, ax=None):
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
#PlotFrozenDist(self.psi_prior, color='green', ax=ax)
ax.hist( self.twalk.Output[self.burnin:,i], density=True)
ax.set_xlabel(r'$R_%d$' % (i))
def Rts( self, q=[10,25,50,75,90]):
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
rts = zeros(( len(q), self.n))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
rts = zeros(( q, self.n))
simulate = True
self.q = q
self.simulate = simulate
#fig, axs = subplots(nrows=5, ncols=1, figsize=( 5, 5))
for i in range(self.n):
if simulate:
#u = uniform.rvs()
rts[:,i] = self.twalk.Output[self.burnin+0,i]
else:
rts[:,i] = quantile( self.twalk.Output[self.burnin:,i], q=q)
self.rts = rts
return rts
def PlotRts( self, color='blue', median_color='red', csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam is an optional file name to save the Rts info.
ax is an Axis hadle to for the plot, if None, it creates one and retruns it.
"""
#self.rts already been produced after running RunMCMC
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(self.n))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(self.n)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim((0.5,3.5))
ax.set_yticks(arange( 0.4, 3.4, step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( csv_fnam, sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
class Rts_AR:
def __init__( self, data_fnam, init_date, trim=0,\
IP_dist=erlang( a=3, scale=8/3), tau=7, m0=0, c_a_0=1, w_a_t=2/7, n0=2, s0=3,\
n=30, pred=0, workdir="./../"):
"""Calculate Rt Using a log autoregressive time series on the logs.
See: ...
See example below.
Parameters:
data_fnam: file name = workdir + 'data/' + data_fnam + '.csv'
or array with case incidence.
init_date: intial date for firt datum, e.g. date(2020, 2, 27).
trim: (negative) cut trim days at the end of data.
tau: number of days to lern form the past (default 7, see paper).
n: calculate n R_t's to the past n days (default 30).
IP_dist: 'frozen' infectiousness profile distribution,
default erlang( a=3, scale=8/3), chosen for covid19.
Only the cdf is needed, ie. IP_dist.cdf(i), to calculate w_s.
m0=0, c_a_0=1, w_a_t=0.25, n0=2, s0=3, m_0, c_0^*, w_t^*, n_0 prior
hyperparameters (see paper).
"""
self.data_fnam = data_fnam
data = loadtxt(workdir + 'data/' + data_fnam + '.csv')
self.workdir = workdir
if trim < 0:
self.data = data[:trim,1]
else:
self.data = data[:,1]
self.init_date = init_date
self.m = len(self.data) ##Data size
### Calculate the serial time distribution
self.IP_dist = IP_dist
self.w = diff(IP_dist.cdf( arange( 0, self.m+1)))
self.w /= sum(self.w)
self.w = flip(self.w)
### Calculation range
self.shift = 5*tau #Number of days to start calculation before the frist Rt.
self.n = min(self.m, n) #Number of Rt's to calculate, from the present into the past.
self.N = n+self.shift #Total range (into the past) for calculation
#If self.N is larger than the whole data set
if self.N > (self.m-1):
self.n -= self.N - (self.m-1)#Reduce self.n accordingly
self.N = n+self.shift
if self.n < 0:
raise ValueError("ERROR: Not enough data to calculate Rts: 5*tau > %d (data size)" % (self.m,))
print("Not enough data to calculate Rts: 5*tau + n > %d (data size)" % (self.m,))
print("Reducing to n=%d" % (self.n,))
for t in range(self.n):
if self.data[self.m-(self.n - t)] >= 10:
break
else:
self.n -= 1 #Reduce n if the counts have not reached 10
print("Incidence below 10, reducing n to %d." % (self.n,))
self.N = self.n+self.shift
### Setting prior parameters
self.delta = 1-(1/tau)
self.tau = tau
self.pred = pred
self.g = 1 #exp(-2/tau)
self.m0 = m0
self.c_a_0 = c_a_0
self.w_a_t = w_a_t
self.n0 = n0
self.s0 = s0
"""
### Calculation range
for t in range( self.m - self.N, self.m):
if sum(self.data[:t]) <= 10:# Rt calculated only for more than 10 counts
print("Not more than 10 counts for day %d" % (-t,))
self.n -= 1
self.N = min(self.m, n+self.shift)
"""
### We calculate all gammas previously:
self.Gammak = zeros(self.m)
for s in range(self.m):
self.Gammak[s] = self.data[:s] @ self.w[(self.m-s):] #\Gamma_k
### Calculate the log data:
### We add 1e-6 for convinience, since very early data may be zero
### This makes no diference at the end.
self.y = log(self.data + 1e-6) - log(self.Gammak + 1e-6)
def sim_data( self, R, I0):
pass
def CalculateRts( self, q=[10,25,50,75,90]):
"""Calculate the posterior distribution and the Rt's quantiles.
q=[10,25,50,75,90], quantiles to use to calulate in the post. dust for R_t.
If q ia a single integer, return a simulation of the Rts of size q, for each Rt.
If q=2, save the mean and dispersion parameter of the posterior for Rt
"""
if isinstance( q, list): ## Return a list of quantiles
q = array(q)/100
self.rts = zeros(( len(q), self.n))
self.rts_pred = zeros((len(q), self.pred))
simulate = False
else: ## If q ia a single integer, return a simulation of the Rts of size q, for each Rt
self.rts = zeros(( q, self.n))
self.rts_pred = zeros(( q, self.pred))
simulate = True
self.q = q
self.simulate = simulate
### nt, at, rt, qt, st, mt, ct # hiperparameters
### 0 1 2 3 4 5 6
self.hiper = zeros(( self.N+1, 7))
### nt, at, rt, qt, st, mt, ct # hiperparameters
self.hiper[0,:] = self.n0, -1, -1, -1, self.s0, self.m0, self.s0*self.c_a_0
for t in range( self.N ):
r_a_t = self.g**2 * self.hiper[t,6] + self.w_a_t #r^*_t
At = r_a_t/(r_a_t + 1)
self.hiper[t+1,0] = self.delta*self.hiper[t,0] + 1 #nt
self.hiper[t+1,1] = self.g * self.hiper[t,5] #at
et = self.y[self.m-(self.N - t)] - self.hiper[t+1,1]
self.hiper[t+1,2] = self.hiper[t,4]*r_a_t #rt
self.hiper[t+1,3] = self.hiper[t,4]*(r_a_t + 1) #qt
# st:
self.hiper[t+1,4] = self.delta*(self.hiper[t,0]/self.hiper[t+1,0])*self.hiper[t,4] +\
self.hiper[t,4]/self.hiper[t+1,0] * (et**2/self.hiper[t+1,3])
self.hiper[t+1,5] = self.hiper[t+1,1] + At*et #mt
#ct
self.hiper[t+1,6] = (self.hiper[t+1,4]/self.hiper[t,4]) * (self.hiper[t+1,2]- self.hiper[t+1,3]*At**2)
if t >= self.shift:
if self.simulate:
self.rts[:,t-self.shift] = exp(t_student.rvs( size=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
else:
self.rts[:,t-self.shift] = exp(t_student.ppf( q=self.q, df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) ))
if self.pred>0:
t = self.N
self.pred_hiper = zeros(( self.pred, 2)) # a_t^k and r_t^k
for k in range(self.pred):
self.pred_hiper[k,0] = self.g**(k+1) * self.hiper[t,5] #a_t^k
if self.g == 1:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * (k+1) #r_t^k
else:
self.pred_hiper[k,1] = self.g**(2*(k+1)) * self.hiper[t,6] + self.w_a_t * ((1-self.g**(2*(k+1)))/(1-self.g**2)) #r_t^k
if self.simulate:
self.rts_pred[:,k] = exp(t_student.rvs( size=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
else:
self.rts_pred[:,k] = exp(t_student.ppf( q=self.q, df=self.hiper[t,0], loc=self.pred_hiper[k,0], scale=sqrt(self.pred_hiper[k,1]) ))
def PlotPostRt( self, i, ax=None, color='black'):
"""Plot the i-th Rt posterior distribution."""
if ax == None:
fig, ax = subplots(figsize=( 5,5) )
t = i+self.tau
y = linspace( 0.01, 4, num=500)
### Transformed pdf using the Jacobian y^{-1}
pdf = (y**-1) * t_student.pdf( log(y), df=self.hiper[t+1,0], loc=self.hiper[t+1,5], scale=sqrt(self.hiper[t+1,6]) )
ax.plot( y, pdf, '-', color=color)
ax.set_ylabel("Density")
ax.set_xlabel(r'$R_{%d}$' % (i))
def PlotRts( self, color='blue', median_color='red', x_jump=1, plot_area=[0.4,2.2], alpha=0.25, csv_fnam=None, ax=None):
"""Makes a board with the Rt evolution.
csv_fnam: optional file name to save the Rts info: workdir/csv/csv_fnam.csv
ax: Axis hadle to for the plot, if None, it creates one and retruns it.
x_jump: put ticks every x_jump days.
plot_area: ([0.4,2.2]), interval with the y-axis (Rt values) plot area.
"""
#self.rts already been produced after running CalculateRts
last_date = self.init_date + timedelta(self.m)
if ax == None:
fig, ax = subplots(figsize=( self.n/3, 3.5) )
### Plot the Rt's posterior quantiles
for i in range(self.n):
h = self.rts[:,i]
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color=color, alpha=0.25)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color=color, alpha=0.25)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
### Plot the observed Rt's
ax.plot( exp(self.y[self.m-self.n:]), '-', color='grey')
### Plot the predictions
if self.pred >0:
for k in range(self.pred):
h = self.rts_pred[:,k]
i=self.n+k
ax.bar( x=i, bottom=h[0], height=h[4]-h[0], width=0.9, color='light'+color, alpha=alpha)
ax.bar( x=i, bottom=h[1], height=h[3]-h[1], width=0.9, color='light'+color, alpha=alpha)
ax.hlines( y=h[2], xmin=i-0.9/2, xmax=i+0.9/2, color=median_color )
ax.set_title(self.data_fnam + r", $R_t$, dist. posterior.")
ax.set_xlabel('')
ax.set_xticks(range(0,self.n,x_jump))
ax.set_xticklabels([(last_date-timedelta(self.n-i)).strftime("%d.%m") for i in range(0,self.n,x_jump)], ha='right')
ax.tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax.axhline(y=1, color='green')
ax.axhline(y=2, color='red')
ax.axhline(y=3, color='darkred')
ax.set_ylim(plot_area)
ax.set_yticks(arange( plot_area[0], plot_area[1], step=0.2))
ax.tick_params( which='major', axis='y', labelsize=10)
ax.grid(color='grey', linestyle='--', linewidth=0.5)
#fig.tight_layout()
if csv_fnam != None:
days = drange( last_date-timedelta(self.n), last_date, timedelta(days=1))
### To save all the data for the plot,
### columns: year, month, day, q_05, q_25, q_50, q_75, q_95
### 0 1 2 3 4 5 6 7
sv = -ones(( len(days), 3+len(self.q)))
for i,day in enumerate(days):
d = date.fromordinal(int(day))
sv[ i, 0] = d.year
sv[ i, 1] = d.month
sv[ i, 2] = d.day
sv[ i, 3:] = self.rts[:,i]
q_str = ', '.join(["q_%02d" % (qunt,) for qunt in self.q])
savetxt( self.workdir + "csv/" + csv_fnam + ".csv", sv, delimiter=', ', fmt='%.1f', header="year, month, day, " + q_str, comments='')
return ax
##### Dirctionary with general information for the metro zone or region to be analyzed:
##### id Name not used Population init date
ZMs = { "9-01": ["Mexico city", 2, 21.942666e6, date(2020, 2, 27)],\
"15-02": ["Toluca", 1, 2.377828e6, date(2020, 3, 7)],\
"31-01": ["Mérida", 2, 1.237697e6, date(2020, 3, 7)],\
"17-02": ["Cuernavaca", 1, 1.059521e6, date(2020, 3, 2)],\
"12-01": ["Acapulco", 2, 0.919726e6, date(2020, 3, 11)],\
"25-01": ["Culiacán", 2, 0.962871e6, date(2020, 3, 1)],\
"23-01": ["Cancun", 2, 0.867768e6, date(2020, 3, 1)]}
### The correponding data files have two columns separated by space, deaths and incidence.
### Each row is one day.
### The file for clave="9-01" (Mexico city) is: ../data/clave.csv etc.
if __name__=='__main__':
rcParams.update({'font.size': 14})
close('all')
#Plot the imputed serial time distribution for covid: erlang( a=3, scale=8/3 )
fig, ax = subplots( num=30, figsize=( 4.5, 3.5))
PlotFrozenDist( erlang( a=3, scale=8/3 ), ax=ax)
### Plota the erlang( a=5, scale=9/5 ) alternative
PlotFrozenDist( erlang( a=5, scale=9/5 ), color='grey', ax=ax)
ax.set_xlim((0,20))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_ylabel(r"Density")
ax.set_xlabel("days")
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/Covid19_SerialTimeDist.png")
### Plot the Rt's estimation. Only Merida, '13-01' and Mexico city, '9-01', are in the paper
claves = ['15-02', '17-02', '23-01', '25-01', '12-01', "31-01", '9-01']
n=60 ## Number of days to calculate the Rt's
trim=0 ## Number of days to cut data from the end, negative, e.g. -10, cut 10 days
x_jump = 7 ## For ploting, put ticks every x_jump days.
for i,clave in enumerate(claves):
print(clave)
### Open an instance of the Rts_AR class:
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=5, n=n)
tst.CalculateRts() # Most be called before ploting the Rt's
### Plot the Rts:
fig, ax = subplots( num=i+1, figsize=( 8, 3.5))
### Plot Cori et al (2013) Poisson model version:
PlotRts_P( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3]+timedelta(days=4),\
n=tst.n, trim=trim, ax=ax, color='green', alpha=0.5, median_color='black')
### Plot ours:
tst.PlotRts( ax=ax, x_jump=x_jump, plot_area=[0.4,2.2], csv_fnam=clave)
ax.set_title("")
ax.set_ylabel(r"$R_t$")
ax.set_xlabel("")
ax.set_title(ZMs[clave][0] + ", Mexico")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
if clave == '9-01':
m_max = tst.m
ax.set_xlabel("day.month, 2020")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_AR.png" % (clave,))
### Figure with Cori et al (2013) posterior distributions of '31-01' and '9-01'
fig1, ax1 = subplots( num=20, nrows=1, ncols=2, figsize=( 10, 3.5))
color = [ "red", "black", "darkred"]
for i,clave in enumerate([ '31-01', '9-01']):
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data, '.-', color=color[i], label=ZMs[clave][0])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[i])
last_date = tst.init_date + timedelta(tst.m)
ax1[0].set_xlabel('')
ax1[0].set_xticks(range(0,tst.m,x_jump*2))
ax1[0].set_xticklabels([(last_date-timedelta(tst.m-i)).strftime("%d.%m") for i in range(0,tst.m,x_jump*2)], ha='right')
ax1[0].tick_params( which='major', axis='x', labelsize=10, labelrotation=30)
ax1[0].set_xlabel("day.month, 2020")
#ax1[0].set_ylim((0,1.1*max(tst.data[-n:])))
ax1[0].grid(color='grey', linestyle='--', linewidth=0.5)
ax1[0].set_ylabel(r"Incidence")
ax1[0].legend(loc=0, shadow = False)
### Add '31-01', with incidence multiplied by 10
clave = '31-01'
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
a, b = Rts_P( tst.data*10, tau=7, n=30, q=2)
ax1[0].plot( arange(m_max-tst.m, m_max, 1), tst.data*10, '.-', color=color[2])
PlotFrozenDist( gamma( a[-1], scale=b[-1]), ax=ax1[1], color=color[2])
ax1[1].set_xticks(arange(0.8,1.4,0.2))
ax1[1].set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax1[1].grid(color='grey', linestyle='--', linewidth=0.5)
fig1.tight_layout()
fig1.savefig("../figs/Rts_Compare.png")
### Comparison of results changing the serial time distribution
fig, ax = subplots( num=31, figsize=( 4.5, 3.5))
tst = Rts_AR( clave, init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax)
#### Here we change the serial time: Any other positive density could be used.
tst = Rts_AR( clave, IP_dist=erlang( a=5, scale=9/5), init_date=ZMs[clave][3]+timedelta(days=4), trim=trim, pred=0, n=n)
tst.CalculateRts()
tst.PlotPostRt( i=n, ax=ax, color='grey')
ax.set_xlim((0.5,2.5))
ax.set_xlabel(r"$R_t$, " + (last_date-timedelta(1)).strftime("%d.%m.%Y"))
ax.grid(color='grey', linestyle='--', linewidth=0.5)
ax.set_title("")
fig.tight_layout()
fig.savefig("../figs/%s_Rts_Compare.png" % (clave,))
"""
################# Example of use of Rts_NB_psi and Rts_NB (not documented)
T=100000
for clave in claves: #Instance of the object and run the MCMC
tst = Rts_NB_psi( clave, init_date=ZMs[clave][3], n=n)
if T > 0:
tst.RunMCMC(T=T)
### Plot the Rts
close(1)
fig, ax = subplots( num=1, figsize=( 10, 3.5) )
tst.PlotRts( ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB_psi.")
fig.savefig("../figs/%s_Rts_NB_psi.png" % (clave,))
### Plot the posterior distribution of \psi
close(3)
fig, ax = subplots( num=3, figsize=( 5,5) )
tst.PlotPostPsi(ax=ax)
ax.set_title(ZMs[clave][0])
fig.savefig("../figs/%s_Rts_NB_Post_psi.png" % clave)
### Fix \psi with the postrior expeted value and use that for PlotRts_NB
close(2)
fig, ax = subplots( num=2, figsize=( 10, 3.5) )
psi = mean(tst.psi_samples) #Posterior mean of psi
PlotRts_NB( '../data/%s.csv' % (clave,), init_date=ZMs[clave][3],\
n=n, psi=psi, ax=ax)
ax.set_title( ZMs[clave][0] + r", $R_t$ NB, fixed $\psi$.")
fig.savefig("../figs/%s_Rts.png" % (clave,))
"""
| [
"scipy.stats.erlang",
"scipy.stats.gamma.rvs",
"numpy.sqrt",
"plotfrozen.PlotFrozenDist",
"numpy.log",
"scipy.stats.beta.logcdf",
"numpy.array",
"datetime.timedelta",
"scipy.stats.uniform.rvs",
"numpy.arange",
"numpy.flip",
"numpy.where",
"matplotlib.pyplot.close",
"numpy.exp",
"numpy.li... | [((1114, 1138), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (1120, 1138), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((2711, 2718), 'numpy.flip', 'flip', (['w'], {}), '(w)\n', (2715, 2718), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((3574, 3598), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (3580, 3598), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((6319, 6359), 'scipy.stats.beta.logcdf', 'beta.logcdf', (['x', '(mu * psi)', '((1 - mu) * psi)'], {}), '(x, mu * psi, (1 - mu) * psi)\n', (6330, 6359), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((6403, 6427), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (6409, 6427), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((8071, 8078), 'numpy.flip', 'flip', (['w'], {}), '(w)\n', (8075, 8078), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8092, 8119), 'numpy.linspace', 'linspace', (['(0.1)', '(3.0)'], {'num': '(100)'}), '(0.1, 3.0, num=100)\n', (8100, 8119), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((9281, 9305), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (9287, 9305), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((32903, 32937), 'matplotlib.pyplot.rcParams.update', 'rcParams.update', (["{'font.size': 14}"], {}), "({'font.size': 14})\n", (32918, 32937), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((32942, 32954), 'matplotlib.pyplot.close', 'close', (['"""all"""'], {}), "('all')\n", (32947, 32954), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((33052, 33088), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(30)', 'figsize': '(4.5, 3.5)'}), '(num=30, figsize=(4.5, 3.5))\n', (33060, 33088), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((35058, 35111), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(20)', 'nrows': '(1)', 'ncols': '(2)', 'figsize': '(10, 3.5)'}), '(num=20, nrows=1, ncols=2, figsize=(10, 3.5))\n', (35066, 35111), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((36814, 36850), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(31)', 'figsize': '(4.5, 3.5)'}), '(num=31, figsize=(4.5, 3.5))\n', (36822, 36850), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((4058, 4076), 'numpy.loadtxt', 'loadtxt', (['data_fnam'], {}), '(data_fnam)\n', (4065, 4076), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((4372, 4384), 'datetime.timedelta', 'timedelta', (['m'], {}), '(m)\n', (4381, 4384), False, 'from datetime import date, timedelta\n'), ((4422, 4452), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(n / 3, 3.5)'}), '(figsize=(n / 3, 3.5))\n', (4430, 4452), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((5188, 5214), 'numpy.arange', 'arange', (['(0.4)', '(3.4)'], {'step': '(0.2)'}), '(0.4, 3.4, step=0.2)\n', (5194, 5214), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((5976, 6080), 'numpy.savetxt', 'savetxt', (['csv_fnam', 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(csv_fnam, sv, delimiter=', ', fmt='%.1f', header=\n 'year, month, day, ' + q_str, comments='')\n", (5983, 6080), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((7937, 7950), 'numpy.zeros', 'zeros', (['(q, n)'], {}), '((q, n))\n', (7942, 7950), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8362, 8376), 'numpy.zeros', 'zeros', (['R.shape'], {}), '(R.shape)\n', (8367, 8376), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8848, 8861), 'numpy.exp', 'exp', (['log_post'], {}), '(log_post)\n', (8851, 8861), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((9734, 9752), 'numpy.loadtxt', 'loadtxt', (['data_fnam'], {}), '(data_fnam)\n', (9741, 9752), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((10058, 10070), 'datetime.timedelta', 'timedelta', (['m'], {}), '(m)\n', (10067, 10070), False, 'from datetime import date, timedelta\n'), ((10108, 10138), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(n / 3, 3.5)'}), '(figsize=(n / 3, 3.5))\n', (10116, 10138), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((10865, 10891), 'numpy.arange', 'arange', (['(0.4)', '(3.4)'], {'step': '(0.2)'}), '(0.4, 3.4, step=0.2)\n', (10871, 10891), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((11653, 11757), 'numpy.savetxt', 'savetxt', (['csv_fnam', 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(csv_fnam, sv, delimiter=', ', fmt='%.1f', header=\n 'year, month, day, ' + q_str, comments='')\n", (11660, 11757), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((11865, 11889), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (11871, 11889), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((13244, 13291), 'numpy.loadtxt', 'loadtxt', (["(workdir + 'data/' + data_fnam + '.csv')"], {}), "(workdir + 'data/' + data_fnam + '.csv')\n", (13251, 13291), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((13646, 13658), 'numpy.flip', 'flip', (['self.w'], {}), '(self.w)\n', (13650, 13658), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((13797, 13840), 'scipy.stats.gamma', 'gamma', (['self.Rt_pr_a'], {'scale': '(1 / self.Rt_pr_b)'}), '(self.Rt_pr_a, scale=1 / self.Rt_pr_b)\n', (13802, 13840), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((13907, 13935), 'scipy.stats.gamma', 'gamma', (['(3)'], {'scale': '(self.psi / 3)'}), '(3, scale=self.psi / 3)\n', (13912, 13935), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((14188, 14201), 'numpy.zeros', 'zeros', (['self.m'], {}), '(self.m)\n', (14193, 14201), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((14359, 14424), 'os.path.isfile', 'os.path.isfile', (["(workdir + 'output/' + self.data_fnam + '_rts.pkl')"], {}), "(workdir + 'output/' + self.data_fnam + '_rts.pkl')\n", (14373, 14424), False, 'import os\n'), ((17421, 17473), 'plotfrozen.PlotFrozenDist', 'PlotFrozenDist', (['self.psi_prior'], {'color': '"""green"""', 'ax': 'ax'}), "(self.psi_prior, color='green', ax=ax)\n", (17435, 17473), False, 'from plotfrozen import PlotFrozenDist\n'), ((21043, 21067), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (21049, 21067), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((22149, 22196), 'numpy.loadtxt', 'loadtxt', (["(workdir + 'data/' + data_fnam + '.csv')"], {}), "(workdir + 'data/' + data_fnam + '.csv')\n", (22156, 22196), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((22601, 22613), 'numpy.flip', 'flip', (['self.w'], {}), '(self.w)\n', (22605, 22613), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((24358, 24371), 'numpy.zeros', 'zeros', (['self.m'], {}), '(self.m)\n', (24363, 24371), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25805, 25827), 'numpy.zeros', 'zeros', (['(self.N + 1, 7)'], {}), '((self.N + 1, 7))\n', (25810, 25827), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((28421, 28447), 'numpy.linspace', 'linspace', (['(0.01)', '(4)'], {'num': '(500)'}), '(0.01, 4, num=500)\n', (28429, 28447), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((32195, 32212), 'datetime.date', 'date', (['(2020)', '(2)', '(27)'], {}), '(2020, 2, 27)\n', (32199, 32212), False, 'from datetime import date, timedelta\n'), ((32273, 32289), 'datetime.date', 'date', (['(2020)', '(3)', '(7)'], {}), '(2020, 3, 7)\n', (32277, 32289), False, 'from datetime import date, timedelta\n'), ((32352, 32368), 'datetime.date', 'date', (['(2020)', '(3)', '(7)'], {}), '(2020, 3, 7)\n', (32356, 32368), False, 'from datetime import date, timedelta\n'), ((32429, 32445), 'datetime.date', 'date', (['(2020)', '(3)', '(2)'], {}), '(2020, 3, 2)\n', (32433, 32445), False, 'from datetime import date, timedelta\n'), ((32507, 32524), 'datetime.date', 'date', (['(2020)', '(3)', '(11)'], {}), '(2020, 3, 11)\n', (32511, 32524), False, 'from datetime import date, timedelta\n'), ((32586, 32602), 'datetime.date', 'date', (['(2020)', '(3)', '(1)'], {}), '(2020, 3, 1)\n', (32590, 32602), False, 'from datetime import date, timedelta\n'), ((32663, 32679), 'datetime.date', 'date', (['(2020)', '(3)', '(1)'], {}), '(2020, 3, 1)\n', (32667, 32679), False, 'from datetime import date, timedelta\n'), ((33111, 33135), 'scipy.stats.erlang', 'erlang', ([], {'a': '(3)', 'scale': '(8 / 3)'}), '(a=3, scale=8 / 3)\n', (33117, 33135), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((33219, 33243), 'scipy.stats.erlang', 'erlang', ([], {'a': '(5)', 'scale': '(9 / 5)'}), '(a=5, scale=9 / 5)\n', (33225, 33243), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((34194, 34231), 'matplotlib.pyplot.subplots', 'subplots', ([], {'num': '(i + 1)', 'figsize': '(8, 3.5)'}), '(num=i + 1, figsize=(8, 3.5))\n', (34202, 34231), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((35575, 35591), 'datetime.timedelta', 'timedelta', (['tst.m'], {}), '(tst.m)\n', (35584, 35591), False, 'from datetime import date, timedelta\n'), ((36334, 36365), 'numpy.arange', 'arange', (['(m_max - tst.m)', 'm_max', '(1)'], {}), '(m_max - tst.m, m_max, 1)\n', (36340, 36365), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((36420, 36445), 'scipy.stats.gamma', 'gamma', (['a[-1]'], {'scale': 'b[-1]'}), '(a[-1], scale=b[-1])\n', (36425, 36445), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((36497, 36518), 'numpy.arange', 'arange', (['(0.8)', '(1.4)', '(0.2)'], {}), '(0.8, 1.4, 0.2)\n', (36503, 36518), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2300, 2308), 'numpy.array', 'array', (['q'], {}), '(q)\n', (2305, 2308), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2531, 2544), 'numpy.zeros', 'zeros', (['(q, n)'], {}), '((q, n))\n', (2536, 2544), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2577, 2590), 'numpy.zeros', 'zeros', (['(q, n)'], {}), '((q, n))\n', (2582, 2590), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((2669, 2685), 'numpy.arange', 'arange', (['(0)', '(m + 1)'], {}), '(0, m + 1)\n', (2675, 2685), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((3424, 3480), 'scipy.stats.gamma.ppf', 'gamma.ppf', (['q', '(Rt_pr_a + S2)'], {'scale': '(1 / (S1 + 1 / Rt_pr_b))'}), '(q, Rt_pr_a + S2, scale=1 / (S1 + 1 / Rt_pr_b))\n', (3433, 3480), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((5440, 5457), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (5449, 5457), False, 'from datetime import date, timedelta\n'), ((7726, 7734), 'numpy.array', 'array', (['q'], {}), '(q)\n', (7731, 7734), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8029, 8045), 'numpy.arange', 'arange', (['(0)', '(m + 1)'], {}), '(0, m + 1)\n', (8035, 8045), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((8791, 8834), 'scipy.stats.gamma.logpdf', 'gamma.logpdf', (['R', 'Rt_pr_a'], {'scale': '(1 / Rt_pr_b)'}), '(R, Rt_pr_a, scale=1 / Rt_pr_b)\n', (8803, 8834), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((8907, 8918), 'numpy.cumsum', 'cumsum', (['pdf'], {}), '(pdf)\n', (8913, 8918), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((8963, 8976), 'scipy.stats.uniform.rvs', 'uniform.rvs', ([], {}), '()\n', (8974, 8976), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((11117, 11134), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (11126, 11134), False, 'from datetime import date, timedelta\n'), ((16109, 16121), 'numpy.ones', 'ones', (['self.n'], {}), '(self.n)\n', (16113, 16121), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((17387, 17411), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (17395, 17411), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((17641, 17665), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (17649, 17665), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((18156, 18174), 'numpy.zeros', 'zeros', (['(q, self.n)'], {}), '((q, self.n))\n', (18161, 18174), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((19021, 19038), 'datetime.timedelta', 'timedelta', (['self.m'], {}), '(self.m)\n', (19030, 19038), False, 'from datetime import date, timedelta\n'), ((19093, 19128), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(self.n / 3, 3.5)'}), '(figsize=(self.n / 3, 3.5))\n', (19101, 19128), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((19953, 19979), 'numpy.arange', 'arange', (['(0.4)', '(3.4)'], {'step': '(0.2)'}), '(0.4, 3.4, step=0.2)\n', (19959, 19979), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((20829, 20933), 'numpy.savetxt', 'savetxt', (['csv_fnam', 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(csv_fnam, sv, delimiter=', ', fmt='%.1f', header=\n 'year, month, day, ' + q_str, comments='')\n", (20836, 20933), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((24656, 24678), 'numpy.log', 'log', (['(self.data + 1e-06)'], {}), '(self.data + 1e-06)\n', (24659, 24678), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((24680, 24704), 'numpy.log', 'log', (['(self.Gammak + 1e-06)'], {}), '(self.Gammak + 1e-06)\n', (24683, 24704), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25518, 25536), 'numpy.zeros', 'zeros', (['(q, self.n)'], {}), '((q, self.n))\n', (25523, 25536), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25566, 25587), 'numpy.zeros', 'zeros', (['(q, self.pred)'], {}), '((q, self.pred))\n', (25571, 25587), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((27344, 27365), 'numpy.zeros', 'zeros', (['(self.pred, 2)'], {}), '((self.pred, 2))\n', (27349, 27365), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((28360, 28384), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(5, 5)'}), '(figsize=(5, 5))\n', (28368, 28384), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((29344, 29361), 'datetime.timedelta', 'timedelta', (['self.m'], {}), '(self.m)\n', (29353, 29361), False, 'from datetime import date, timedelta\n'), ((29416, 29451), 'matplotlib.pyplot.subplots', 'subplots', ([], {'figsize': '(self.n / 3, 3.5)'}), '(figsize=(self.n / 3, 3.5))\n', (29424, 29451), False, 'from matplotlib.pyplot import subplots, rcParams, close\n'), ((29877, 29906), 'numpy.exp', 'exp', (['self.y[self.m - self.n:]'], {}), '(self.y[self.m - self.n:])\n', (29880, 29906), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((30914, 30958), 'numpy.arange', 'arange', (['plot_area[0]', 'plot_area[1]'], {'step': '(0.2)'}), '(plot_area[0], plot_area[1], step=0.2)\n', (30920, 30958), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((31808, 31945), 'numpy.savetxt', 'savetxt', (["(self.workdir + 'csv/' + csv_fnam + '.csv')", 'sv'], {'delimiter': '""", """', 'fmt': '"""%.1f"""', 'header': "('year, month, day, ' + q_str)", 'comments': '""""""'}), "(self.workdir + 'csv/' + csv_fnam + '.csv', sv, delimiter=', ', fmt=\n '%.1f', header='year, month, day, ' + q_str, comments='')\n", (31815, 31945), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((35380, 35411), 'numpy.arange', 'arange', (['(m_max - tst.m)', 'm_max', '(1)'], {}), '(m_max - tst.m, m_max, 1)\n', (35386, 35411), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((35488, 35513), 'scipy.stats.gamma', 'gamma', (['a[-1]'], {'scale': 'b[-1]'}), '(a[-1], scale=b[-1])\n', (35493, 35513), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((37119, 37143), 'scipy.stats.erlang', 'erlang', ([], {'a': '(5)', 'scale': '(9 / 5)'}), '(a=5, scale=9 / 5)\n', (37125, 37143), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((3325, 3386), 'scipy.stats.gamma.rvs', 'gamma.rvs', (['(Rt_pr_a + S2)'], {'scale': '(1 / (S1 + 1 / Rt_pr_b))', 'size': 'q'}), '(Rt_pr_a + S2, scale=1 / (S1 + 1 / Rt_pr_b), size=q)\n', (3334, 3386), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((5415, 5427), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (5424, 5427), False, 'from datetime import date, timedelta\n'), ((11092, 11104), 'datetime.timedelta', 'timedelta', (['n'], {}), '(n)\n', (11101, 11104), False, 'from datetime import date, timedelta\n'), ((13576, 13597), 'numpy.arange', 'arange', (['(0)', '(self.m + 1)'], {}), '(0, self.m + 1)\n', (13582, 13597), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((16130, 16154), 'scipy.stats.uniform.rvs', 'uniform.rvs', ([], {'size': 'self.n'}), '(size=self.n)\n', (16141, 16154), False, 'from scipy.stats import erlang, gamma, nbinom, uniform, beta\n'), ((17956, 17964), 'numpy.array', 'array', (['q'], {}), '(q)\n', (17961, 17964), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((18519, 18568), 'numpy.quantile', 'quantile', (['self.twalk.Output[self.burnin:, i]'], {'q': 'q'}), '(self.twalk.Output[self.burnin:, i], q=q)\n', (18527, 18568), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((20230, 20247), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (20239, 20247), False, 'from datetime import date, timedelta\n'), ((22531, 22552), 'numpy.arange', 'arange', (['(0)', '(self.m + 1)'], {}), '(0, self.m + 1)\n', (22537, 22552), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((25253, 25261), 'numpy.array', 'array', (['q'], {}), '(q)\n', (25258, 25261), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((28542, 28548), 'numpy.log', 'log', (['y'], {}), '(y)\n', (28545, 28548), False, 'from numpy import arange, diff, loadtxt, zeros, flip, array, log, quantile, ones\n'), ((31209, 31226), 'datetime.timedelta', 'timedelta', ([], {'days': '(1)'}), '(days=1)\n', (31218, 31226), False, 'from datetime import date, timedelta\n'), ((36225, 36242), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (36234, 36242), False, 'from datetime import date, timedelta\n'), ((36902, 36919), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (36911, 36919), False, 'from datetime import date, timedelta\n'), ((37168, 37185), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (37177, 37185), False, 'from datetime import date, timedelta\n'), ((20200, 20217), 'datetime.timedelta', 'timedelta', (['self.n'], {}), '(self.n)\n', (20209, 20217), False, 'from datetime import date, timedelta\n'), ((28601, 28627), 'numpy.sqrt', 'sqrt', (['self.hiper[t + 1, 6]'], {}), '(self.hiper[t + 1, 6])\n', (28605, 28627), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((31179, 31196), 'datetime.timedelta', 'timedelta', (['self.n'], {}), '(self.n)\n', (31188, 31196), False, 'from datetime import date, timedelta\n'), ((34039, 34056), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (34048, 34056), False, 'from datetime import date, timedelta\n'), ((34362, 34379), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (34371, 34379), False, 'from datetime import date, timedelta\n'), ((35266, 35283), 'datetime.timedelta', 'timedelta', ([], {'days': '(4)'}), '(days=4)\n', (35275, 35283), False, 'from datetime import date, timedelta\n'), ((4895, 4911), 'datetime.timedelta', 'timedelta', (['(n - i)'], {}), '(n - i)\n', (4904, 4911), False, 'from datetime import date, timedelta\n'), ((9007, 9021), 'numpy.where', 'where', (['(cdf < u)'], {}), '(cdf < u)\n', (9012, 9021), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((10572, 10588), 'datetime.timedelta', 'timedelta', (['(n - i)'], {}), '(n - i)\n', (10581, 10588), False, 'from datetime import date, timedelta\n'), ((35704, 35724), 'datetime.timedelta', 'timedelta', (['(tst.m - i)'], {}), '(tst.m - i)\n', (35713, 35724), False, 'from datetime import date, timedelta\n'), ((36564, 36576), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (36573, 36576), False, 'from datetime import date, timedelta\n'), ((37349, 37361), 'datetime.timedelta', 'timedelta', (['(1)'], {}), '(1)\n', (37358, 37361), False, 'from datetime import date, timedelta\n'), ((9116, 9132), 'numpy.where', 'where', (['(cdf < qua)'], {}), '(cdf < qua)\n', (9121, 9132), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((19626, 19647), 'datetime.timedelta', 'timedelta', (['(self.n - i)'], {}), '(self.n - i)\n', (19635, 19647), False, 'from datetime import date, timedelta\n'), ((30578, 30599), 'datetime.timedelta', 'timedelta', (['(self.n - i)'], {}), '(self.n - i)\n', (30587, 30599), False, 'from datetime import date, timedelta\n'), ((27064, 27090), 'numpy.sqrt', 'sqrt', (['self.hiper[t + 1, 6]'], {}), '(self.hiper[t + 1, 6])\n', (27068, 27090), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((27240, 27266), 'numpy.sqrt', 'sqrt', (['self.hiper[t + 1, 6]'], {}), '(self.hiper[t + 1, 6])\n', (27244, 27266), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((27982, 28009), 'numpy.sqrt', 'sqrt', (['self.pred_hiper[k, 1]'], {}), '(self.pred_hiper[k, 1])\n', (27986, 28009), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n'), ((28159, 28186), 'numpy.sqrt', 'sqrt', (['self.pred_hiper[k, 1]'], {}), '(self.pred_hiper[k, 1])\n', (28163, 28186), False, 'from numpy import savetxt, linspace, exp, cumsum, where, append, sqrt\n')] |
import FWCore.ParameterSet.Config as cms
process = cms.Process("ProcessOne")
process.load("CondCore.DBCommon.CondDBCommon_cfi")
process.CondDBCommon.DBParameters.authenticationPath = '/nfshome0/popcondev/conddb'
#
# Choose the output database
#
process.CondDBCommon.connect = 'oracle://cms_orcon_prod/CMS_COND_42X_ECAL_LASP'
#process.CondDBCommon.connect = 'sqlite_file:DB.db'
process.MessageLogger = cms.Service("MessageLogger",
debugModules = cms.untracked.vstring('*'),
destinations = cms.untracked.vstring('cout')
)
process.source = cms.Source("EmptyIOVSource",
firstValue = cms.uint64(1),
lastValue = cms.uint64(1),
timetype = cms.string('runnumber'),
interval = cms.uint64(1)
)
process.PoolDBESSource = cms.ESSource("PoolDBESSource",
process.CondDBCommon,
timetype = cms.untracked.string('timestamp'),
toGet = cms.VPSet(cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_last')
))
)
process.PoolDBOutputService = cms.Service("PoolDBOutputService",
process.CondDBCommon,
logconnect = cms.untracked.string('sqlite_file:DBLog.db'),
timetype = cms.untracked.string('timestamp'),
toPut = cms.VPSet(cms.PSet(
record = cms.string('EcalLaserAPDPNRatiosRcd'),
tag = cms.string('EcalLaserAPDPNRatios_last')
))
)
#
# Be sure to comment the following line while testing
#
#process.PoolDBOutputService.logconnect = cms.untracked.string('oracle://cms_orcon_prod/CMS_COND_31X_POPCONLOG')
process.Test1 = cms.EDAnalyzer("ExTestEcalLaserAnalyzer",
SinceAppendMode = cms.bool(True),
record = cms.string('EcalLaserAPDPNRatiosRcd'),
loggingOn = cms.untracked.bool(True),
Source = cms.PSet(
# maxtime is mandatory
# it can be expressed either as an absolute time with format YYYY-MM-DD HH24:MI:SS
# or as a relative time w.r.t. now, using -N, where N is expressed in units
# of hours
# maxtime = cms.string("-40"),
maxtime = cms.string("2012-12-12 23:59:59"),
sequences = cms.string("16"),
OnlineDBUser = cms.string('CMS_ECAL_LASER_COND'),
# debug must be False for production
debug = cms.bool(False),
# if fake is True, no insertion in the db is performed
fake = cms.bool(True),
OnlineDBPassword = cms.string('<PASSWORD>'),
OnlineDBSID = cms.string('CMS_OMDS_LB')
)
)
process.p = cms.Path(process.Test1)
| [
"FWCore.ParameterSet.Config.string",
"FWCore.ParameterSet.Config.untracked.string",
"FWCore.ParameterSet.Config.bool",
"FWCore.ParameterSet.Config.Process",
"FWCore.ParameterSet.Config.untracked.vstring",
"FWCore.ParameterSet.Config.untracked.bool",
"FWCore.ParameterSet.Config.Path",
"FWCore.Parameter... | [((52, 77), 'FWCore.ParameterSet.Config.Process', 'cms.Process', (['"""ProcessOne"""'], {}), "('ProcessOne')\n", (63, 77), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2957, 2980), 'FWCore.ParameterSet.Config.Path', 'cms.Path', (['process.Test1'], {}), '(process.Test1)\n', (2965, 2980), True, 'import FWCore.ParameterSet.Config as cms\n'), ((483, 509), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""*"""'], {}), "('*')\n", (504, 509), True, 'import FWCore.ParameterSet.Config as cms\n'), ((562, 591), 'FWCore.ParameterSet.Config.untracked.vstring', 'cms.untracked.vstring', (['"""cout"""'], {}), "('cout')\n", (583, 591), True, 'import FWCore.ParameterSet.Config as cms\n'), ((718, 731), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (728, 731), True, 'import FWCore.ParameterSet.Config as cms\n'), ((773, 786), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (783, 786), True, 'import FWCore.ParameterSet.Config as cms\n'), ((827, 850), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""runnumber"""'], {}), "('runnumber')\n", (837, 850), True, 'import FWCore.ParameterSet.Config as cms\n'), ((891, 904), 'FWCore.ParameterSet.Config.uint64', 'cms.uint64', (['(1)'], {}), '(1)\n', (901, 904), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1101, 1134), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""timestamp"""'], {}), "('timestamp')\n", (1121, 1134), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1536, 1580), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""sqlite_file:DBLog.db"""'], {}), "('sqlite_file:DBLog.db')\n", (1556, 1580), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1635, 1668), 'FWCore.ParameterSet.Config.untracked.string', 'cms.untracked.string', (['"""timestamp"""'], {}), "('timestamp')\n", (1655, 1668), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2145, 2159), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (2153, 2159), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2174, 2211), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalLaserAPDPNRatiosRcd"""'], {}), "('EcalLaserAPDPNRatiosRcd')\n", (2184, 2211), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2229, 2253), 'FWCore.ParameterSet.Config.untracked.bool', 'cms.untracked.bool', (['(True)'], {}), '(True)\n', (2247, 2253), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2538, 2571), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""2012-12-12 23:59:59"""'], {}), "('2012-12-12 23:59:59')\n", (2548, 2571), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2593, 2609), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""16"""'], {}), "('16')\n", (2603, 2609), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2636, 2669), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""CMS_ECAL_LASER_COND"""'], {}), "('CMS_ECAL_LASER_COND')\n", (2646, 2669), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2728, 2743), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(False)'], {}), '(False)\n', (2736, 2743), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2819, 2833), 'FWCore.ParameterSet.Config.bool', 'cms.bool', (['(True)'], {}), '(True)\n', (2827, 2833), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2862, 2886), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""<PASSWORD>"""'], {}), "('<PASSWORD>')\n", (2872, 2886), True, 'import FWCore.ParameterSet.Config as cms\n'), ((2910, 2935), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""CMS_OMDS_LB"""'], {}), "('CMS_OMDS_LB')\n", (2920, 2935), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1215, 1252), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalLaserAPDPNRatiosRcd"""'], {}), "('EcalLaserAPDPNRatiosRcd')\n", (1225, 1252), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1264, 1303), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalLaserAPDPNRatios_last"""'], {}), "('EcalLaserAPDPNRatios_last')\n", (1274, 1303), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1753, 1790), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalLaserAPDPNRatiosRcd"""'], {}), "('EcalLaserAPDPNRatiosRcd')\n", (1763, 1790), True, 'import FWCore.ParameterSet.Config as cms\n'), ((1802, 1841), 'FWCore.ParameterSet.Config.string', 'cms.string', (['"""EcalLaserAPDPNRatios_last"""'], {}), "('EcalLaserAPDPNRatios_last')\n", (1812, 1841), True, 'import FWCore.ParameterSet.Config as cms\n')] |
from algosdk import logic
from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn
from ..contract_strings import algofi_manager_strings as manager_strings
from .prepend import get_init_txns
from ..utils import TransactionGroup, Transactions, randint, int_to_bytes
OPT_IN_MIN_BALANCE=0.65
def prepare_staking_contract_optin_transactions(manager_app_id, market_app_id, sender, storage_address, suggested_params):
"""Returns a :class:`TransactionGroup` object representing a staking contract opt in
group transaction. The sender and storage account opt in to the staking application
and the storage account is rekeyed to the manager account address, rendering it
unable to be transacted against by the sender and therefore immutable.
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param max_atomic_opt_in_market_app_ids: max opt in market app ids
:type max_atomic_opt_in_market_app_ids: list
:param sender: account address for the sender
:type sender: string
:param storage_address: address of the storage account
:type storage_address: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:return: :class:`TransactionGroup` object representing a manager opt in group transaction
:rtype: :class:`TransactionGroup`
"""
txn_payment = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=storage_address,
amt=int(OPT_IN_MIN_BALANCE*1e6)
)
txn_market = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=market_app_id
)
txn_user_opt_in_manager = ApplicationOptInTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id
)
app_address = logic.get_application_address(manager_app_id)
txn_storage_opt_in_manager = ApplicationOptInTxn(
sender=storage_address,
sp=suggested_params,
index=manager_app_id,
rekey_to=app_address
)
txn_group = TransactionGroup([txn_payment, txn_market, txn_user_opt_in_manager, txn_storage_opt_in_manager])
return txn_group
def prepare_stake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, market_address, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a stake
transaction against the algofi protocol. The sender sends assets to the
staking account and is credited with a stake.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of asset to supply for minting collateral
:type amount: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the asset market application
:type market_app_id: int
:param market_address: account address for the market application
:type market_address: string
:param oracle_app_id: id of the asset market application
:type oracle_app_id: int
:param asset_id: asset id of the asset being supplied, defaults to None (algo)
:type asset_id: int, optional
:return: :class:`TransactionGroup` object representing a mint to collateral group transaction
:rtype: :class:`TransactionGroup`
"""
supported_oracle_app_ids = [oracle_app_id]
supported_market_app_ids = [market_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.MINT_TO_COLLATERAL,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
)
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.mint_to_collateral.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
if asset_id:
txn2 = AssetTransferTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount,
index=asset_id
)
else:
txn2 = PaymentTxn(
sender=sender,
sp=suggested_params,
receiver=market_address,
amt=amount
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1, txn2])
return txn_group
def prepare_unstake_transactions(sender, suggested_params, storage_account, amount, manager_app_id, market_app_id, oracle_app_id, asset_id=None):
"""Returns a :class:`TransactionGroup` object representing a remove stake
group transaction against the algofi protocol. The sender requests to remove stake
from a stake acount and if successful, the stake is removed.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param amount: amount of collateral to remove from the market
:type amount: int
:param asset_id: asset id of the asset underlying the collateral
:type asset_id: int
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application of the collateral
:type oracle_app_id: int
:return: :class:`TransactionGroup` object representing a remove collateral underlying group transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.REMOVE_COLLATERAL_UNDERLYING,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode(), int_to_bytes(amount)]
)
if asset_id:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
foreign_assets=[asset_id],
accounts=[storage_account]
)
else:
txn1 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=market_app_id,
app_args=[manager_strings.remove_collateral_underlying.encode()],
foreign_apps=[manager_app_id],
accounts=[storage_account]
)
txn_group = TransactionGroup(prefix_transactions + [txn0, txn1])
return txn_group
def prepare_claim_staking_rewards_transactions(sender, suggested_params, storage_account, manager_app_id, market_app_id, oracle_app_id, foreign_assets):
"""Returns a :class:`TransactionGroup` object representing a claim rewards
underlying group transaction against the algofi protocol. The sender requests
to claim rewards from the manager acount. If not, the account sends
back the user the amount of asset underlying their posted collateral.
:param sender: account address for the sender
:type sender: string
:param suggested_params: suggested transaction params
:type suggested_params: :class:`algosdk.future.transaction.SuggestedParams` object
:param storage_account: storage account address for sender
:type storage_account: string
:param manager_app_id: id of the manager application
:type manager_app_id: int
:param market_app_id: id of the market application of the collateral
:type market_app_id: int
:param oracle_app_id: id of the oracle application
:type oracle_app_id: int
:param foreign_assets: list of rewards assets in the staking contract
:type foreign_assets: list
:return: :class:`TransactionGroup` object representing a claim rewards transaction
:rtype: :class:`TransactionGroup`
"""
supported_market_app_ids = [market_app_id]
supported_oracle_app_ids = [oracle_app_id]
prefix_transactions = get_init_txns(
transaction_type=Transactions.CLAIM_REWARDS,
sender=sender,
suggested_params=suggested_params,
manager_app_id=manager_app_id,
supported_market_app_ids=supported_market_app_ids,
supported_oracle_app_ids=supported_oracle_app_ids,
storage_account=storage_account
)
txn0 = ApplicationNoOpTxn(
sender=sender,
sp=suggested_params,
index=manager_app_id,
app_args=[manager_strings.claim_rewards.encode()],
accounts=[storage_account],
foreign_assets=foreign_assets
)
txn_group = TransactionGroup(prefix_transactions + [txn0])
return txn_group | [
"algosdk.logic.get_application_address",
"algosdk.future.transaction.ApplicationOptInTxn",
"algosdk.future.transaction.PaymentTxn",
"algosdk.future.transaction.AssetTransferTxn"
] | [((1659, 1749), 'algosdk.future.transaction.ApplicationOptInTxn', 'ApplicationOptInTxn', ([], {'sender': 'storage_address', 'sp': 'suggested_params', 'index': 'market_app_id'}), '(sender=storage_address, sp=suggested_params, index=\n market_app_id)\n', (1678, 1749), False, 'from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn\n'), ((1805, 1882), 'algosdk.future.transaction.ApplicationOptInTxn', 'ApplicationOptInTxn', ([], {'sender': 'sender', 'sp': 'suggested_params', 'index': 'manager_app_id'}), '(sender=sender, sp=suggested_params, index=manager_app_id)\n', (1824, 1882), False, 'from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn\n'), ((1931, 1976), 'algosdk.logic.get_application_address', 'logic.get_application_address', (['manager_app_id'], {}), '(manager_app_id)\n', (1960, 1976), False, 'from algosdk import logic\n'), ((2010, 2123), 'algosdk.future.transaction.ApplicationOptInTxn', 'ApplicationOptInTxn', ([], {'sender': 'storage_address', 'sp': 'suggested_params', 'index': 'manager_app_id', 'rekey_to': 'app_address'}), '(sender=storage_address, sp=suggested_params, index=\n manager_app_id, rekey_to=app_address)\n', (2029, 2123), False, 'from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn\n'), ((4622, 4732), 'algosdk.future.transaction.AssetTransferTxn', 'AssetTransferTxn', ([], {'sender': 'sender', 'sp': 'suggested_params', 'receiver': 'market_address', 'amt': 'amount', 'index': 'asset_id'}), '(sender=sender, sp=suggested_params, receiver=\n market_address, amt=amount, index=asset_id)\n', (4638, 4732), False, 'from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn\n'), ((4823, 4911), 'algosdk.future.transaction.PaymentTxn', 'PaymentTxn', ([], {'sender': 'sender', 'sp': 'suggested_params', 'receiver': 'market_address', 'amt': 'amount'}), '(sender=sender, sp=suggested_params, receiver=market_address, amt\n =amount)\n', (4833, 4911), False, 'from algosdk.future.transaction import ApplicationOptInTxn, AssetOptInTxn, ApplicationNoOpTxn, PaymentTxn, AssetTransferTxn\n')] |
import rumps
import requests
import json
API_URL = 'https://koinex.in/api/ticker'
UPDATE_INTERVAL = 60
CURRENCIES = {
'Bitcoin': 'BTC',
'Ethereum': 'ETH',
'Ripple': 'XRP',
'Litecoin': 'LTC',
'Bitcoin Cash': 'BCH',
}
class KoinexStatusBarApp(rumps.App):
def __init__(self):
super(KoinexStatusBarApp, self).__init__("Koinex")
self.currencies = CURRENCIES.keys()
self.menu = CURRENCIES.keys()
self.enabled = ['Bitcoin', 'Ripple']
self.prices = {}
# Initialize click handlers
for item in self.menu:
rumps.clicked(item)(self.toggle_currency)
# Add check to menu items which are enabled
for item in self.enabled:
self.menu[item].state = 1
# Add separator
self.menu.add(None)
@rumps.timer(UPDATE_INTERVAL)
def update(self, sender):
response = requests.get(API_URL)
title = ''
if response.status_code == 200:
data = json.loads(response.content)
self.prices = data.get('prices', {})
for currency in self.enabled:
short = CURRENCIES.get(currency)
title += u'{} \u20B9 {} | '.format(short, self.prices.get(short))
self.title = title[:-3] # Last 3 characters will be ' | '
def toggle_currency(self, menuitem):
currency = menuitem.title
if currency in self.enabled:
self.enabled.remove(currency)
menuitem.state = 0
else:
self.enabled.append(currency)
menuitem.state = 1
self.update(None)
if __name__ == "__main__":
KoinexStatusBarApp().run() | [
"rumps.clicked",
"json.loads",
"rumps.timer",
"requests.get"
] | [((819, 847), 'rumps.timer', 'rumps.timer', (['UPDATE_INTERVAL'], {}), '(UPDATE_INTERVAL)\n', (830, 847), False, 'import rumps\n'), ((897, 918), 'requests.get', 'requests.get', (['API_URL'], {}), '(API_URL)\n', (909, 918), False, 'import requests\n'), ((997, 1025), 'json.loads', 'json.loads', (['response.content'], {}), '(response.content)\n', (1007, 1025), False, 'import json\n'), ((593, 612), 'rumps.clicked', 'rumps.clicked', (['item'], {}), '(item)\n', (606, 612), False, 'import rumps\n')] |
import numpy as np
from numpy.core.fromnumeric import mean
from numpy.core.numeric import True_
from numpy.testing._private.utils import rand
from polynomial_regression import PolynomialRegression
from generate_regression_data import generate_regression_data
from metrics import mean_squared_error # mse
from math import log # use if scale too large to see error
from k_nearest_neighbor import KNearestNeighbor
try:
import matplotlib.pyplot as plt
except:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
if __name__ == '__main__':
# Number 7, split A
degree = 4
N = 100
x, y = generate_regression_data(degree, N, amount_of_noise=0.1)
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:10]], y[rand_sampl[:10]]
x_test, y_test = x[rand_sampl[10:]], y[rand_sampl[10:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N7_splitA/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N7_splitA/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N7_splitA/lowest_training_and_test_error.png")
# Number 10, split A
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(knn)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k]), kplots[low_test_err_k], label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitA/lowest_test_error.png")
# Number 9, split B
rand_sampl = np.random.choice(N, N, replace=False) # do not reselect numbers
x_training, y_training = x[rand_sampl[:50]], y[rand_sampl[:50]]
x_test, y_test = x[rand_sampl[50:]], y[rand_sampl[50:]]
plots = []
mse_training = []
mse_test = []
# to 9 degrees
for i in range(9):
poly = PolynomialRegression(i)
poly.fit(x_training, y_training)
poly.visualize(x_training, y_training, path=f"../plots_N9_splitB/training_plot_degree_{i}",
title=f"Training Plot Degree {i}")
# test will be red
poly.visualize(x_test, y_test, path=f"../plots_N9_splitB/test_plot_degree_{i}",
title=f"Test Plot Degree {i}", color='r')
y_hat_training = poly.predict(x_training) # predicted value
mse_training.append(mean_squared_error(y_training, y_hat_training))
y_hat_test = poly.predict(x_test)
mse_test.append(mean_squared_error(y_test, y_hat_test))
plots.append(poly)
plt.clf() # clear figure
plt.figure()
# log was needed to scale
plt.plot(range(9), [log(mse_training[i]) for i in range(9)], label="training error")
plt.plot(range(9), [log(mse_test[i]) for i in range(9)], label="test error")
plt.title("Error as a Function of Degree")
plt.xlabel("degree")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/error_as_a_function_of_degree.png")
# get the two lowest errors
low_test_err_degree = mse_test.index(min(mse_test))
low_training_err_degree = mse_training.index(min(mse_training))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(plots[low_training_err_degree].X_training), plots[low_training_err_degree].f, label=f"lowest training error curve with degree = {low_training_err_degree}")
plt.plot(np.sort(plots[low_test_err_degree].X_training), plots[low_test_err_degree].f, label=f"lowest test error curve with degree = {low_test_err_degree}")
plt.title("Lowest Training and Test Errors")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N9_splitB/lowest_training_and_test_error.png")
# Number 10, split B
k = {1, 3, 5, 7, 9}
kplots = []
mse_training_k = []
mse_test_k = []
kx_training = np.reshape(x_training, (-1,2))
ky_training = np.reshape(y_training, (-1,2))
kx_test = np.reshape(x_test, (-1, 2))
ky_test = np.reshape(y_test, (-1,2))
#print(kx_training)
#print(kx_training.shape)
for i in k:
knn = KNearestNeighbor(i, distance_measure="euclidean", aggregator="mean")
knn.fit(kx_training, ky_training)
#print(f"x_training = {x_training.shape}")
k_training = knn.predict(kx_training)
mse_training_k.append(mean_squared_error(ky_training, k_training))
k_test = knn.predict(kx_test)
mse_test_k.append(mean_squared_error(ky_test, k_test))
kplots.append(poly)
plt.clf() # clear figure
plt.figure()
plt.plot(range(5), [(mse_training_k[i]) for i in range(5)], label="training error")
plt.plot(range(5), [(mse_test_k[i]) for i in range(5)], label="test error")
plt.title("Error as a Function of k")
plt.xlabel("k")
plt.ylabel("error")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/error_as_a_function_of_k.png")
low_test_err_k = mse_test_k.index(min(mse_test_k))
plt.clf() # clear figure
plt.figure()
plt.scatter(x_training, y_training)
plt.plot(np.sort(kplots[low_test_err_k].X_training), kplots[low_test_err_k].f, label=f"lowest test error curve with k = {low_test_err_k}")
plt.title("Lowest Test Error")
plt.xlabel("x")
plt.ylabel("y")
plt.legend()
plt.grid(True)
plt.savefig("../plots_N10_splitB/lowest_test_error.png")
| [
"generate_regression_data.generate_regression_data",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.savefig",
"numpy.reshape",
"matplotlib.pyplot.ylabel",
"numpy.random.choice",
"matplotlib.use",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.clf",
"polynomial_regression.PolynomialRegression",
"nu... | [((637, 693), 'generate_regression_data.generate_regression_data', 'generate_regression_data', (['degree', 'N'], {'amount_of_noise': '(0.1)'}), '(degree, N, amount_of_noise=0.1)\n', (661, 693), False, 'from generate_regression_data import generate_regression_data\n'), ((712, 749), 'numpy.random.choice', 'np.random.choice', (['N', 'N'], {'replace': '(False)'}), '(N, N, replace=False)\n', (728, 749), True, 'import numpy as np\n'), ((1756, 1765), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1763, 1765), True, 'import matplotlib.pyplot as plt\n'), ((1786, 1798), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1796, 1798), True, 'import matplotlib.pyplot as plt\n'), ((2003, 2045), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of Degree"""'], {}), "('Error as a Function of Degree')\n", (2012, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2050, 2070), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""degree"""'], {}), "('degree')\n", (2060, 2070), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2094), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (2085, 2094), True, 'import matplotlib.pyplot as plt\n'), ((2099, 2111), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2109, 2111), True, 'import matplotlib.pyplot as plt\n'), ((2116, 2130), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2124, 2130), True, 'import matplotlib.pyplot as plt\n'), ((2135, 2202), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N7_splitA/error_as_a_function_of_degree.png"""'], {}), "('../plots_N7_splitA/error_as_a_function_of_degree.png')\n", (2146, 2202), True, 'import matplotlib.pyplot as plt\n'), ((2365, 2374), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2372, 2374), True, 'import matplotlib.pyplot as plt\n'), ((2395, 2407), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2405, 2407), True, 'import matplotlib.pyplot as plt\n'), ((2412, 2447), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (2423, 2447), True, 'import matplotlib.pyplot as plt\n'), ((2790, 2834), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Training and Test Errors"""'], {}), "('Lowest Training and Test Errors')\n", (2799, 2834), True, 'import matplotlib.pyplot as plt\n'), ((2839, 2854), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (2849, 2854), True, 'import matplotlib.pyplot as plt\n'), ((2859, 2874), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (2869, 2874), True, 'import matplotlib.pyplot as plt\n'), ((2879, 2891), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2889, 2891), True, 'import matplotlib.pyplot as plt\n'), ((2896, 2910), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (2904, 2910), True, 'import matplotlib.pyplot as plt\n'), ((2915, 2983), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N7_splitA/lowest_training_and_test_error.png"""'], {}), "('../plots_N7_splitA/lowest_training_and_test_error.png')\n", (2926, 2983), True, 'import matplotlib.pyplot as plt\n'), ((3113, 3144), 'numpy.reshape', 'np.reshape', (['x_training', '(-1, 2)'], {}), '(x_training, (-1, 2))\n', (3123, 3144), True, 'import numpy as np\n'), ((3162, 3193), 'numpy.reshape', 'np.reshape', (['y_training', '(-1, 2)'], {}), '(y_training, (-1, 2))\n', (3172, 3193), True, 'import numpy as np\n'), ((3207, 3234), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 2)'], {}), '(x_test, (-1, 2))\n', (3217, 3234), True, 'import numpy as np\n'), ((3249, 3276), 'numpy.reshape', 'np.reshape', (['y_test', '(-1, 2)'], {}), '(y_test, (-1, 2))\n', (3259, 3276), True, 'import numpy as np\n'), ((3781, 3790), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3788, 3790), True, 'import matplotlib.pyplot as plt\n'), ((3811, 3823), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3821, 3823), True, 'import matplotlib.pyplot as plt\n'), ((3996, 4033), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of k"""'], {}), "('Error as a Function of k')\n", (4005, 4033), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4053), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (4048, 4053), True, 'import matplotlib.pyplot as plt\n'), ((4058, 4077), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (4068, 4077), True, 'import matplotlib.pyplot as plt\n'), ((4082, 4094), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4092, 4094), True, 'import matplotlib.pyplot as plt\n'), ((4099, 4113), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4107, 4113), True, 'import matplotlib.pyplot as plt\n'), ((4118, 4181), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitA/error_as_a_function_of_k.png"""'], {}), "('../plots_N10_splitA/error_as_a_function_of_k.png')\n", (4129, 4181), True, 'import matplotlib.pyplot as plt\n'), ((4243, 4252), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4250, 4252), True, 'import matplotlib.pyplot as plt\n'), ((4273, 4285), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4283, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4290, 4325), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (4301, 4325), True, 'import matplotlib.pyplot as plt\n'), ((4460, 4490), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Test Error"""'], {}), "('Lowest Test Error')\n", (4469, 4490), True, 'import matplotlib.pyplot as plt\n'), ((4495, 4510), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (4505, 4510), True, 'import matplotlib.pyplot as plt\n'), ((4515, 4530), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (4525, 4530), True, 'import matplotlib.pyplot as plt\n'), ((4535, 4547), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (4545, 4547), True, 'import matplotlib.pyplot as plt\n'), ((4552, 4566), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (4560, 4566), True, 'import matplotlib.pyplot as plt\n'), ((4571, 4627), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitA/lowest_test_error.png"""'], {}), "('../plots_N10_splitA/lowest_test_error.png')\n", (4582, 4627), True, 'import matplotlib.pyplot as plt\n'), ((4679, 4716), 'numpy.random.choice', 'np.random.choice', (['N', 'N'], {'replace': '(False)'}), '(N, N, replace=False)\n', (4695, 4716), True, 'import numpy as np\n'), ((5723, 5732), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (5730, 5732), True, 'import matplotlib.pyplot as plt\n'), ((5753, 5765), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5763, 5765), True, 'import matplotlib.pyplot as plt\n'), ((5970, 6012), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of Degree"""'], {}), "('Error as a Function of Degree')\n", (5979, 6012), True, 'import matplotlib.pyplot as plt\n'), ((6017, 6037), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""degree"""'], {}), "('degree')\n", (6027, 6037), True, 'import matplotlib.pyplot as plt\n'), ((6042, 6061), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (6052, 6061), True, 'import matplotlib.pyplot as plt\n'), ((6066, 6078), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6076, 6078), True, 'import matplotlib.pyplot as plt\n'), ((6083, 6097), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6091, 6097), True, 'import matplotlib.pyplot as plt\n'), ((6102, 6169), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N9_splitB/error_as_a_function_of_degree.png"""'], {}), "('../plots_N9_splitB/error_as_a_function_of_degree.png')\n", (6113, 6169), True, 'import matplotlib.pyplot as plt\n'), ((6332, 6341), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (6339, 6341), True, 'import matplotlib.pyplot as plt\n'), ((6362, 6374), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (6372, 6374), True, 'import matplotlib.pyplot as plt\n'), ((6379, 6414), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (6390, 6414), True, 'import matplotlib.pyplot as plt\n'), ((6757, 6801), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Training and Test Errors"""'], {}), "('Lowest Training and Test Errors')\n", (6766, 6801), True, 'import matplotlib.pyplot as plt\n'), ((6806, 6821), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (6816, 6821), True, 'import matplotlib.pyplot as plt\n'), ((6826, 6841), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (6836, 6841), True, 'import matplotlib.pyplot as plt\n'), ((6846, 6858), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (6856, 6858), True, 'import matplotlib.pyplot as plt\n'), ((6863, 6877), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (6871, 6877), True, 'import matplotlib.pyplot as plt\n'), ((6882, 6950), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N9_splitB/lowest_training_and_test_error.png"""'], {}), "('../plots_N9_splitB/lowest_training_and_test_error.png')\n", (6893, 6950), True, 'import matplotlib.pyplot as plt\n'), ((7076, 7107), 'numpy.reshape', 'np.reshape', (['x_training', '(-1, 2)'], {}), '(x_training, (-1, 2))\n', (7086, 7107), True, 'import numpy as np\n'), ((7125, 7156), 'numpy.reshape', 'np.reshape', (['y_training', '(-1, 2)'], {}), '(y_training, (-1, 2))\n', (7135, 7156), True, 'import numpy as np\n'), ((7170, 7197), 'numpy.reshape', 'np.reshape', (['x_test', '(-1, 2)'], {}), '(x_test, (-1, 2))\n', (7180, 7197), True, 'import numpy as np\n'), ((7212, 7239), 'numpy.reshape', 'np.reshape', (['y_test', '(-1, 2)'], {}), '(y_test, (-1, 2))\n', (7222, 7239), True, 'import numpy as np\n'), ((7745, 7754), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (7752, 7754), True, 'import matplotlib.pyplot as plt\n'), ((7775, 7787), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (7785, 7787), True, 'import matplotlib.pyplot as plt\n'), ((7960, 7997), 'matplotlib.pyplot.title', 'plt.title', (['"""Error as a Function of k"""'], {}), "('Error as a Function of k')\n", (7969, 7997), True, 'import matplotlib.pyplot as plt\n'), ((8002, 8017), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""k"""'], {}), "('k')\n", (8012, 8017), True, 'import matplotlib.pyplot as plt\n'), ((8022, 8041), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""error"""'], {}), "('error')\n", (8032, 8041), True, 'import matplotlib.pyplot as plt\n'), ((8046, 8058), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8056, 8058), True, 'import matplotlib.pyplot as plt\n'), ((8063, 8077), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8071, 8077), True, 'import matplotlib.pyplot as plt\n'), ((8082, 8145), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitB/error_as_a_function_of_k.png"""'], {}), "('../plots_N10_splitB/error_as_a_function_of_k.png')\n", (8093, 8145), True, 'import matplotlib.pyplot as plt\n'), ((8207, 8216), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (8214, 8216), True, 'import matplotlib.pyplot as plt\n'), ((8237, 8249), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (8247, 8249), True, 'import matplotlib.pyplot as plt\n'), ((8254, 8289), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_training', 'y_training'], {}), '(x_training, y_training)\n', (8265, 8289), True, 'import matplotlib.pyplot as plt\n'), ((8437, 8467), 'matplotlib.pyplot.title', 'plt.title', (['"""Lowest Test Error"""'], {}), "('Lowest Test Error')\n", (8446, 8467), True, 'import matplotlib.pyplot as plt\n'), ((8472, 8487), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (8482, 8487), True, 'import matplotlib.pyplot as plt\n'), ((8492, 8507), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (8502, 8507), True, 'import matplotlib.pyplot as plt\n'), ((8512, 8524), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (8522, 8524), True, 'import matplotlib.pyplot as plt\n'), ((8529, 8543), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (8537, 8543), True, 'import matplotlib.pyplot as plt\n'), ((8548, 8604), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""../plots_N10_splitB/lowest_test_error.png"""'], {}), "('../plots_N10_splitB/lowest_test_error.png')\n", (8559, 8604), True, 'import matplotlib.pyplot as plt\n'), ((488, 509), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (502, 509), False, 'import matplotlib\n'), ((1020, 1043), 'polynomial_regression.PolynomialRegression', 'PolynomialRegression', (['i'], {}), '(i)\n', (1040, 1043), False, 'from polynomial_regression import PolynomialRegression\n'), ((2461, 2511), 'numpy.sort', 'np.sort', (['plots[low_training_err_degree].X_training'], {}), '(plots[low_training_err_degree].X_training)\n', (2468, 2511), True, 'import numpy as np\n'), ((2638, 2684), 'numpy.sort', 'np.sort', (['plots[low_test_err_degree].X_training'], {}), '(plots[low_test_err_degree].X_training)\n', (2645, 2684), True, 'import numpy as np\n'), ((3361, 3429), 'k_nearest_neighbor.KNearestNeighbor', 'KNearestNeighbor', (['i'], {'distance_measure': '"""euclidean"""', 'aggregator': '"""mean"""'}), "(i, distance_measure='euclidean', aggregator='mean')\n", (3377, 3429), False, 'from k_nearest_neighbor import KNearestNeighbor\n'), ((4339, 4370), 'numpy.sort', 'np.sort', (['kplots[low_test_err_k]'], {}), '(kplots[low_test_err_k])\n', (4346, 4370), True, 'import numpy as np\n'), ((4987, 5010), 'polynomial_regression.PolynomialRegression', 'PolynomialRegression', (['i'], {}), '(i)\n', (5007, 5010), False, 'from polynomial_regression import PolynomialRegression\n'), ((6428, 6478), 'numpy.sort', 'np.sort', (['plots[low_training_err_degree].X_training'], {}), '(plots[low_training_err_degree].X_training)\n', (6435, 6478), True, 'import numpy as np\n'), ((6605, 6651), 'numpy.sort', 'np.sort', (['plots[low_test_err_degree].X_training'], {}), '(plots[low_test_err_degree].X_training)\n', (6612, 6651), True, 'import numpy as np\n'), ((7324, 7392), 'k_nearest_neighbor.KNearestNeighbor', 'KNearestNeighbor', (['i'], {'distance_measure': '"""euclidean"""', 'aggregator': '"""mean"""'}), "(i, distance_measure='euclidean', aggregator='mean')\n", (7340, 7392), False, 'from k_nearest_neighbor import KNearestNeighbor\n'), ((8303, 8345), 'numpy.sort', 'np.sort', (['kplots[low_test_err_k].X_training'], {}), '(kplots[low_test_err_k].X_training)\n', (8310, 8345), True, 'import numpy as np\n'), ((1570, 1616), 'metrics.mean_squared_error', 'mean_squared_error', (['y_training', 'y_hat_training'], {}), '(y_training, y_hat_training)\n', (1588, 1616), False, 'from metrics import mean_squared_error\n'), ((1684, 1722), 'metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_hat_test'], {}), '(y_test, y_hat_test)\n', (1702, 1722), False, 'from metrics import mean_squared_error\n'), ((1853, 1873), 'math.log', 'log', (['mse_training[i]'], {}), '(mse_training[i])\n', (1856, 1873), False, 'from math import log\n'), ((1942, 1958), 'math.log', 'log', (['mse_test[i]'], {}), '(mse_test[i])\n', (1945, 1958), False, 'from math import log\n'), ((3599, 3642), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_training', 'k_training'], {}), '(ky_training, k_training)\n', (3617, 3642), False, 'from metrics import mean_squared_error\n'), ((3708, 3743), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_test', 'k_test'], {}), '(ky_test, k_test)\n', (3726, 3743), False, 'from metrics import mean_squared_error\n'), ((5537, 5583), 'metrics.mean_squared_error', 'mean_squared_error', (['y_training', 'y_hat_training'], {}), '(y_training, y_hat_training)\n', (5555, 5583), False, 'from metrics import mean_squared_error\n'), ((5651, 5689), 'metrics.mean_squared_error', 'mean_squared_error', (['y_test', 'y_hat_test'], {}), '(y_test, y_hat_test)\n', (5669, 5689), False, 'from metrics import mean_squared_error\n'), ((5820, 5840), 'math.log', 'log', (['mse_training[i]'], {}), '(mse_training[i])\n', (5823, 5840), False, 'from math import log\n'), ((5909, 5925), 'math.log', 'log', (['mse_test[i]'], {}), '(mse_test[i])\n', (5912, 5925), False, 'from math import log\n'), ((7562, 7605), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_training', 'k_training'], {}), '(ky_training, k_training)\n', (7580, 7605), False, 'from metrics import mean_squared_error\n'), ((7671, 7706), 'metrics.mean_squared_error', 'mean_squared_error', (['ky_test', 'k_test'], {}), '(ky_test, k_test)\n', (7689, 7706), False, 'from metrics import mean_squared_error\n')] |
# EG10-20 Twinkle Twinkle classes
import time
import snaps
class Note:
def __init__(self, note, duration):
self.__note = note
self.__duration = duration
def play(self):
snaps.play_note(self.__note)
time.sleep(self.__duration)
tune = [Note(note=0, duration=0.4), Note(note=0, duration=0.4),
Note(note=7, duration=0.4), Note(note=7, duration=0.4),
Note(note=9, duration=0.4), Note(note=9, duration=0.4),
Note(note=7, duration=0.8), Note(note=5, duration=0.4),
Note(note=5, duration=0.4), Note(note=4, duration=0.4),
Note(note=4, duration=0.4), Note(note=2, duration=0.4),
Note(note=2, duration=0.4), Note(note=0, duration=0.8)]
for note in tune:
note.play()
| [
"time.sleep",
"snaps.play_note"
] | [((204, 232), 'snaps.play_note', 'snaps.play_note', (['self.__note'], {}), '(self.__note)\n', (219, 232), False, 'import snaps\n'), ((241, 268), 'time.sleep', 'time.sleep', (['self.__duration'], {}), '(self.__duration)\n', (251, 268), False, 'import time\n')] |
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
# LavaVu conf based on conf.py from underworld2
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import os, sys
from os.path import dirname, join, abspath
sys.path.insert(0, abspath(join(dirname(__file__), '..')))
sys.path.insert(0, abspath(join(dirname(__file__), '..', '..')))
import setup as lsetup
# -- Project information -----------------------------------------------------
project = 'LavaVu'
copyright = '2020, Monash University'
author = '<NAME>, Monash University'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = lsetup.version
print('BUILDING LAVAVU DOCS FOR VERSION', release)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.autosummary',
'sphinx.ext.githubpages',
'sphinx_markdown_tables',
'myst_parser',
# 'nbsphinx',
]
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_init_with_doc = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
napoleon_use_keyword = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = ['.rst', '.md']
#source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
#html_theme = 'pyramid'
#import sphinx_rtd_theme
#html_theme = "sphinx_rtd_theme"
#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
# Set the width of the content area. Defaults to '900px'
'sidebar_width': '300px',
'page_width': '90%',
#'fixed_sidebar': 'true', #Need to scroll for full table of contents
'font_family': 'sans',
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
#html_sidebars = {}
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LavaVudoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LavaVu.tex', 'LavaVu Documentation',
'<NAME>', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lavavu', 'LavaVu Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LavaVu', 'LavaVu Documentation',
author, 'LavaVu', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# setup mock classes so no building is required
# generate rst files
import os
import sys
# add current directory for `generate_api_documentation`
sys.path.append(os.path.dirname(__name__))
# add top project directory as well
sys.path.insert(0, os.path.join(os.path.dirname(__name__),'../../lavavu'))
try:
import lavavu
import convert
import points
import tracers
import control
except (Exception) as e:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return MagicMock()
MOCK_MODULES = ['scipy', 'numpy', '_LavaVuPython']
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
import generate_api_documentation
import subprocess
subprocess.call("./run-nb-to-rst.sh", shell=True)
| [
"mock.Mock",
"os.path.dirname",
"subprocess.call"
] | [((7645, 7694), 'subprocess.call', 'subprocess.call', (['"""./run-nb-to-rst.sh"""'], {'shell': '(True)'}), "('./run-nb-to-rst.sh', shell=True)\n", (7660, 7694), False, 'import subprocess\n'), ((7045, 7070), 'os.path.dirname', 'os.path.dirname', (['__name__'], {}), '(__name__)\n', (7060, 7070), False, 'import os\n'), ((7140, 7165), 'os.path.dirname', 'os.path.dirname', (['__name__'], {}), '(__name__)\n', (7155, 7165), False, 'import os\n'), ((771, 788), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (778, 788), False, 'from os.path import dirname, join, abspath\n'), ((830, 847), 'os.path.dirname', 'dirname', (['__file__'], {}), '(__file__)\n', (837, 847), False, 'from os.path import dirname, join, abspath\n'), ((7453, 7464), 'mock.Mock', 'MagicMock', ([], {}), '()\n', (7462, 7464), True, 'from mock import Mock as MagicMock\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import collections
import mkkm_mr
import networkx as nx
from sklearn.cluster import KMeans, SpectralClustering
from snf_simple import SNF
from pamogk import config
from pamogk import label_mapper
from pamogk.data_processor import rnaseq_processor as rp, synapse_rppa_processor as rpp
from pamogk.gene_mapper import uniprot_mapper
from pamogk.kernels.lmkkmeans_train import lmkkmeans_train
from pamogk.kernels.pamogk import kernel
from pamogk.lib.sutils import *
from pamogk.pathway_reader import cx_pathway_reader as cx_pw
# see https://www.mathworks.com/help/matlab/matlab_external/install-the-matlab-engine-for-python.html
from pamogk.result_processor.label_analysis import LabelAnalysis
# import sys
# sys.path.insert(0, '/Users/fma/dev/bilkent/research/snf')
# sys.path.insert(0, '/Users/fma/dev/bilkent/research/mkkm-mr')
parser = argparse.ArgumentParser(description='Run PAMOGK-mut algorithms on pathways')
parser.add_argument('--run-id', '-rid', metavar='run-id', dest='run_id', type=str, help='Unique Run ID')
parser.add_argument('--rs-patient-data', '-rs', metavar='file-path', dest='rnaseq_patient_data', type=str2path,
help='rnaseq pathway ID list',
default=config.DATA_DIR / 'kirc_data/unc.edu_KIRC_IlluminaHiSeq_RNASeqV2.geneExp.whitelist_tumor.txt')
parser.add_argument('--rp-patient-data', '-rp', metavar='file-path', dest='rppa_patient_data', type=str2path,
help='rppa pathway ID list', default=config.DATA_DIR / 'kirc_data/kirc_rppa_data')
parser.add_argument('--som-patient-data', '-s', metavar='file-path', dest='som_patient_data', type=str2path,
help='som mut pathway ID list',
default=config.DATA_DIR / 'kirc_data/kirc_somatic_mutation_data.csv')
parser.add_argument('--label', '-m', metavar='label', dest='label', type=str, default='th196',
help='Label value that will be smoothed')
# used values: [0, 0.01, 0.05, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
parser.add_argument('--smoothing-alpha', '-a', metavar='alpha', dest='smoothing_alpha', type=float, default=0.01,
help='Smoothing alpha in range of 0-1')
parser.add_argument('--drop-percent', '-p', metavar='drop-percent', dest='drop_percent', type=int, default=1,
help='Drop percentage in range of 0-100')
parser.add_argument('--threshold', '-t', metavar='threshold', dest='threshold', type=float, default=1.96,
help='Cut off threshold')
parser.add_argument('--continuous', '-c', metavar='bool', dest='continuous', type=str2bool, default=True,
help='Whether to produce continuous values for under/over expressed')
parser.add_argument('--normalize-kernels', '-nk', dest='kernel_normalization', type=str2bool, default=True,
help='Kernel Normalization')
args = {}
class Experiment1(object):
def __init__(self, args):
"""
Parameters
----------
args:
arguments
"""
self.args = args
self.label = args.label
self.smoothing_alpha = args.smoothing_alpha
self.kernel_normalization = args.kernel_normalization
self.drop_percent = args.drop_percent
self.threshold = args.threshold
self.log2_lambdas = list(range(-15, 16, 3))
# these are kernel related params
# each experiment may have different methods to build kernels
exp_subdir = f'{Path(__file__).stem}-{self.__class__.__name__}'
param_dir = f'label={self.label}-smoothing_alpha={self.smoothing_alpha}-kr_norm={self.kernel_normalization}'
run_suffix = ''
if self.args.run_id is not None:
run_suffix = f'-run={self.args.run_id}'
self.data_dir = config.DATA_DIR / 'pamogk_kirc' / exp_subdir / param_dir
self.result_dir = self.data_dir / ('results' + run_suffix)
self.kernel_dir = self.data_dir / 'kernels'
self.label_analyzer = None
# this will create with all roots
safe_create_dir(self.result_dir)
safe_create_dir(self.kernel_dir)
# change log and create log file
change_log_path(self.data_dir / 'run.log')
log('exp_data_dir:', self.data_dir)
self.get_rnaseq_pw_path = lambda \
pw_id: self.kernel_dir / f'rnaseq-over-under-expressed-pw_id={pw_id}.gpickle'
self.get_rppa_pw_path = lambda \
pw_id: self.kernel_dir / f'rppa-over-under-expressed-pw_id={pw_id}.gpickle'
self.get_som_pw_path = lambda \
pw_id: self.kernel_dir / f'pamogk-som-expressed-pw_id={pw_id}.gpickle'
@timeit
def read_rnaseq_data(self):
# Real Data #
# process RNA-seq expression data
gene_exp, gene_name_map = rp.process(self.args.rnaseq_patient_data, self.args.continuous, self.args.threshold)
# convert entrez gene id to uniprot id
pat_ids = gene_exp.columns.values # patient TCGA ids
ent_ids = gene_exp.index.values # gene entrez ids
return gene_exp.values, pat_ids, ent_ids
@timeit
def read_rppa_data(self):
# Real Data #
# process RNA-seq expression data
gene_exp = rpp.process(self.args.rppa_patient_data, self.args.continuous, self.args.threshold)
# convert entrez gene id to uniprot id
pat_ids = gene_exp.columns.values # patient TCGA ids
ent_ids = gene_exp.index.values # gene entrez ids
return gene_exp.values, pat_ids, ent_ids
@timeit
def read_som_data(self):
"""
Returns
-------
mapping of patient to mutations by entrez ids
"""
# Real Data #
# process RNA-seq expression data
patients = {}
with open(config.get_safe_data_file(self.args.som_patient_data)) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
pat_id = row['Patient ID']
ent_id = row['Entrez Gene ID']
if pat_id not in patients:
patients[pat_id] = {ent_id}
else:
patients[pat_id].add(ent_id)
return collections.OrderedDict(sorted(patients.items()))
@timeit
def find_intersection_patients(self, rs_GE, rs_pat, rp_GE, rp_pat, som_pat):
rs_pat_list = simplify_pat_ids(rs_pat)
rp_pat_list = simplify_pat_ids(rp_pat)
som_pat_list = simplify_pat_ids(som_pat.keys())
intersection_list = list(set(rs_pat_list).intersection(rp_pat_list, som_pat_list))
intersection_list.sort()
intersect_loc = self.data_dir / 'patients.csv'
save_csv(intersect_loc, [[pid] for pid in intersection_list])
def clean_patient_list_and_ge_data(patients, ge, whitelist):
pat_list = simplify_pat_ids(patients)
to_del = [idx for idx, value in enumerate(pat_list) if value not in whitelist]
return np.delete(patients, to_del), np.delete(ge, to_del, axis=1)
rs_pat, rs_GE = clean_patient_list_and_ge_data(rs_pat, rs_GE, intersection_list)
rp_pat, rp_GE = clean_patient_list_and_ge_data(rp_pat, rp_GE, intersection_list)
som_pat_deleted_list = [pid for pid in som_pat.keys() if pid not in intersection_list]
for item in som_pat_deleted_list:
som_pat.pop(item, None)
return rs_GE, rs_pat, rp_GE, rp_pat, som_pat
@timeit
def preprocess_seq_patient_data(self, GE, all_ent_ids):
# get the dictionary of gene id mappers
uni2ent, ent2uni = uniprot_mapper.json_to_dict()
found_ent_ids = [eid in ent2uni for eid in all_ent_ids]
ent_ids = np.array([eid for eid in all_ent_ids if eid in ent2uni])
uni_ids = np.array([ent2uni[eid] for eid in ent_ids], dtype=object)
log('uni_ids:', len(uni_ids))
log('miss_ent_ids:', len(all_ent_ids) - sum(found_ent_ids))
# prune genes whose uniprot id is not found
GE = GE[found_ent_ids]
return GE, uni_ids
@timeit
def preprocess_som_patient_data(self, patients):
# get the dictionary of gene id mappers
uni2ent, ent2uni = uniprot_mapper.json_to_dict()
res = []
num_empty = 0
for pat_id, ent_ids in patients.items():
# uni_ids = [uid for eid in ent_ids if eid in ent2uni for uid in ent2uni[eid]]
uni_ids = [uid for eid in ent_ids if eid in ent2uni for uid in ent2uni[eid]]
# if there are any matches map them
res.append({
'pat_id': pat_id,
'mutated_nodes': uni_ids,
})
log('removed patients:', num_empty)
return res
@timeit
def read_pathways(self):
# get all pathways
return cx_pw.read_pathways()
def rnaseq_pathways_save_valid(self, all_pw_map):
return np.all([self.get_rnaseq_pw_path(pw_id).exists() for pw_id in all_pw_map])
def rppa_pathways_save_valid(self, all_pw_map):
return np.all([self.get_rppa_pw_path(pw_id).exists() for pw_id in all_pw_map])
def som_pathways_save_valid(self, all_pw_map):
return np.all([self.get_som_pw_path(pw_id).exists() for pw_id in all_pw_map])
@timeit
def restore_rnaseq_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
res_pw_map = collections.OrderedDict()
for ind, pw_id in enumerate(all_pw_map.keys()):
path = self.get_rnaseq_pw_path(pw_id)
logr(f'Loading over/under rnaseq expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
res_pw_map[pw_id] = nx.read_gpickle(path)
log()
return res_pw_map
@timeit
def restore_rppa_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
res_pw_map = collections.OrderedDict()
for ind, pw_id in enumerate(all_pw_map.keys()):
path = self.get_rppa_pw_path(pw_id)
logr(f'Loading over/under rppa expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
res_pw_map[pw_id] = nx.read_gpickle(path)
log()
return res_pw_map
@timeit
def restore_som_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
res_pw_map = collections.OrderedDict()
for ind, pw_id in enumerate(all_pw_map.keys()):
path = self.get_som_pw_path(pw_id)
logr(f'Loading somatic mutation data {ind + 1:3}/{num_pw} pw_id={pw_id}')
res_pw_map[pw_id] = nx.read_gpickle(path)
log()
return res_pw_map
@timeit
def save_rnaseq_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
for ind, (pw_id, pw) in enumerate(all_pw_map.items()):
path = self.get_rnaseq_pw_path(pw_id)
logr(f'Saving over/under rnaseq expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
nx.write_gpickle(pw, path)
log()
@timeit
def save_rppa_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
for ind, (pw_id, pw) in enumerate(all_pw_map.items()):
path = self.get_rppa_pw_path(pw_id)
logr(f'Saving over/under rppa expressed data {ind + 1:3}/{num_pw} pw_id={pw_id}')
nx.write_gpickle(pw, path)
log()
@timeit
def save_som_pathways(self, all_pw_map):
num_pw = len(all_pw_map)
for ind, (pw_id, pw) in enumerate(all_pw_map.items()):
path = self.get_som_pw_path(pw_id)
logr(f'Saving somatic mutation data {ind + 1:3}/{num_pw} pw_id={pw_id}')
nx.write_gpickle(pw, path)
log()
@timeit
def label_rnaseq_patient_genes(self, all_pw_map, pat_ids, GE, uni_ids):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
pat_ids: :obj:`list` of :obj:`str`
list of patient ids
GE: :obj:`numpy.ndarray`
Gene expression data array in shape of genes by patients
uni_ids: :obj:`numpy.ndarray`
mapping from uniprot to gene
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.rnaseq_pathways_save_valid(all_pw_map):
return self.restore_rnaseq_pathways(all_pw_map)
num_pat = pat_ids.shape[0]
# if there are missing ones calculate all of them
log('RNAseq Over and under expressed patient pathway labeling')
for ind, pid in enumerate(pat_ids):
if self.args.continuous:
gene_vals = (GE[..., pat_ids == pid]).flatten() # over expressed genes
logr(f'RNAseq Checking patient for over-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('oe', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'oe-{self.label}', pid, all_pw_map, 'oe', self.threshold)
logr(f'RNAseq Checking patient for under-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('ue', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'ue-{self.label}', pid, all_pw_map, 'ue', self.threshold)
else:
logr(f'RNAseq Checking patient for over-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == 1).flatten() # over expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('oe', pid, all_pw_map, genes, self.label)
logr(f'RNAseq Checking patient for under-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == -1).flatten() # under expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('ue', pid, all_pw_map, genes, self.label)
log()
self.save_rnaseq_pathways(all_pw_map)
return all_pw_map
@timeit
def label_rppa_patient_genes(self, all_pw_map, pat_ids, GE, uni_ids):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
pat_ids: :obj:`list` of :obj:`str`
list of patient ids
GE: :obj:`numpy.ndarray`
Gene expression data array in shape of genes by patients
uni_ids: :obj:`numpy.ndarray`
mapping from uniprot to gene
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.rppa_pathways_save_valid(all_pw_map):
return self.restore_rppa_pathways(all_pw_map)
num_pat = pat_ids.shape[0]
# if there are missing ones calculate all of them
log('RPPA Over and under expressed patient pathway labeling')
for ind, pid in enumerate(pat_ids):
if self.args.continuous:
gene_vals = (GE[..., pat_ids == pid]).flatten() # over expressed genes
logr(f'RPPA Checking patient for over-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('oe', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'oe-{self.label}', pid, all_pw_map, 'oe', self.threshold)
logr(f'RPPA Checking patient for under-expressed {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_cont_label_on_pathways('ue', pid, all_pw_map, uni_ids, gene_vals)
label_mapper.mark_extra_label_on_pathways(f'ue-{self.label}', pid, all_pw_map, 'ue', self.threshold)
else:
logr(f'RPPA Checking patient for rppa over-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == 1).flatten() # over expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('oe', pid, all_pw_map, genes, self.label)
logr(f'RPPA Checking patient for rppa under-expressed {ind + 1:4}/{num_pat} pid={pid}')
gene_ind = (GE[..., pat_ids == pid] == -1).flatten() # under expressed genes
genes = uni_ids[gene_ind] # get uniprot gene ids from indices
label_mapper.mark_label_on_pathways('ue', pid, all_pw_map, genes, self.label)
log()
self.save_rppa_pathways(all_pw_map)
return all_pw_map
def label_som_patient_genes(self, all_pw_map, patients):
"""Labels all patients with matching level of expression
Parameters
----------
all_pw_map: :obj:`list` of :obj:`networkx.classes.graph.Graph`
a dictionary of all pathways we are using
patients: :obj:`list`
list of patients with mutation mappings
"""
# check if we already stored all over/under expression pathway data if so restore them
if self.som_pathways_save_valid(all_pw_map):
return self.restore_som_pathways(all_pw_map)
num_pat = len(patients)
# if there are missing ones calculate all of them
log('Somatic mutation patient pathway labeling')
for ind, patient in enumerate(patients):
pid = patient['pat_id']
genes = patient['mutated_nodes'] # get uniprot gene ids from indices
genes = np.array([genes])
logr(f'Checking patient for somatic mutation {ind + 1:4}/{num_pat} pid={pid}')
label_mapper.mark_label_on_pathways('som', pid, all_pw_map, genes, self.label)
log()
self.save_som_pathways(all_pw_map)
return all_pw_map
@timeit
def create_seq_kernels(self, all_pw_map, pat_ids, kms_file_name):
# experiment variables
num_pat = pat_ids.shape[0]
num_pw = len(all_pw_map)
kms_path = self.kernel_dir / f'{kms_file_name}.npz'
if kms_path.exists(): return np_load_data(kms_path, key='kms')
# calculate kernel matrices for over expressed genes
over_exp_kms = np.zeros((num_pw, num_pat, num_pat))
for ind, (pw_id, pw) in enumerate(all_pw_map.items()): # for each pathway
over_exp_kms[ind] = kernel(pat_ids, pw, label_key=f'label-oe-{self.label}', alpha=self.smoothing_alpha,
normalization=self.kernel_normalization)
logr(f'Calculating oe pathway kernel={kms_file_name} {ind + 1:4}/{num_pw} pw_id={pw_id}')
log()
# calculate kernel matrices for under expressed genes
under_exp_kms = np.zeros((num_pw, num_pat, num_pat))
for ind, (pw_id, pw) in enumerate(all_pw_map.items()): # for each pathway
under_exp_kms[ind] = kernel(pat_ids, pw, label_key=f'label-ue-{self.label}', alpha=self.smoothing_alpha,
normalization=self.kernel_normalization)
logr(f'Calculating ue pathway kernel={kms_file_name} {ind + 1:4}/{num_pw} pw_id={pw_id}')
log()
kms = np.vstack([over_exp_kms, under_exp_kms]) # stack all kernels
np.savez_compressed(kms_path, kms=kms) # save kernels
return kms
@timeit
def create_som_kernels(self, all_pw_map, patients):
# experiment variables
num_pat = len(patients)
num_pw = len(all_pw_map)
kms_path = self.kernel_dir / 'som-kms.npz'
if kms_path.exists(): return np_load_data(kms_path, key='kms')
# calculate kernel matrices for over expressed genes
kms = np.zeros((num_pw, num_pat, num_pat))
pat_ids = np.array([pat['pat_id'] for pat in patients])
for ind, (pw_id, pw) in enumerate(all_pw_map.items()): # for each pathway
kms[ind] = kernel(pat_ids, pw, label_key='label-som', alpha=self.smoothing_alpha,
normalization=self.kernel_normalization)
logr(f'Calculating som mut pathway kernel {ind + 1:4}/{num_pat} pw_id={pw_id}')
log()
np.savez_compressed(kms_path, kms=kms) # save kernels
return kms
@staticmethod
def kmeans_cluster(U, n_clusters):
U_normalized = mkkm_mr.lib.normalize_unit_row(U)
return KMeans(n_clusters=n_clusters, max_iter=100, n_init=50).fit_predict(U_normalized)
def cluster_cont(self, kernels, n_clusters):
snf_K = 20 # number of neighbors, usually (10~30)
snf_t = 20 # number of iterations, usually (10~20)
# SNF
# W = snf_compute.snf(*kernels, K=snf_K, t=snf_t)
W = SNF(kernels, K=snf_K, t=snf_t)
# KMeans
labels = self.kmeans_cluster(W, n_clusters)
np_save_npz(self.result_dir / f'pamogk-snf-kmeans-k={n_clusters}', labels=labels)
# Spectral
labels = SpectralClustering(n_clusters, affinity='precomputed').fit_predict(W)
np_save_npz(self.result_dir / f'pamogk-snf-spectral-k={n_clusters}', labels=labels)
KH = mkkm_mr.lib.kernel_centralize(kernels)
KH = mkkm_mr.lib.kernel_normalize(KH)
num_ker = kernels.shape[0]
gamma0 = np.ones((num_ker, 1)) / num_ker
avgKer = mkkm_mr.lib.combine_kernels(KH, gamma0)
H = mkkm_mr.lib.kernel_kmeans_iter(avgKer, n_clusters)
labels = self.kmeans_cluster(H, n_clusters)
np_save_npz(self.result_dir / f'pamogk-kmeans-k={n_clusters}.csv', labels=labels)
# AAAI - 16 - MKKM-MR
M = mkkm_mr.lib.calM(KH)
lambdas = np.power(2., self.log2_lambdas)
for log2_lambda, lambda_ in zip(self.log2_lambdas, lambdas):
log(f'running for n_clusters={n_clusters} log2_lambda={log2_lambda}')
[H, weights, obj] = mkkm_mr.mkkm_mr(KH, M, n_clusters, lambda_)
labels = self.kmeans_cluster(H, n_clusters)
out_file = self.result_dir / f'pamogk-mkkm-k={n_clusters}-log2_lambda={log2_lambda}'
np_save_npz(out_file, labels=labels, weights=weights, obj=obj)
def cluster_discrete(self, kernels, n_clusters):
save_path = self.result_dir / f'labels_dropped={self.drop_percent}' / f'pamogk-all-lmkkmeans-k={n_clusters}'
if save_path.exists():
with np.load(save_path) as data:
return data['labels', 'weights']
labels, weights = lmkkmeans_train(kernels, cluster_count=n_clusters, iteration_count=5)
ensure_file_dir(save_path)
np_save_npz(f'{save_path}-weights', labels=labels, weights=weights)
return labels, weights
@timeit
def cluster(self, kernels, n_clusters):
if self.args.continuous:
return self.cluster_cont(kernels, n_clusters)
else:
return self.cluster_discrete(kernels, n_clusters)
@timeit
def run(self):
# Patient part
# RnaSeq Data
rs_GE, rs_pat_ids, rs_ent_ids = self.read_rnaseq_data()
# Rppa Data
rp_GE, rp_pat_ids, rp_ent_ids = self.read_rppa_data()
# Somatic mutation data
som_patients = self.read_som_data()
# Find intersect
rs_GE, rs_pat_ids, rp_GE, rp_pat_ids, som_patients = self.find_intersection_patients(rs_GE, rs_pat_ids, rp_GE,
rp_pat_ids, som_patients)
# Kernel part
# RnaSeq Data
rs_GE, rs_uni_ids = self.preprocess_seq_patient_data(rs_GE, rs_ent_ids)
all_rs_pw_map = self.read_pathways()
labeled_all_rs_pw_map = self.label_rnaseq_patient_genes(all_rs_pw_map, rs_pat_ids, rs_GE, rs_uni_ids)
rs_kernels = self.create_seq_kernels(labeled_all_rs_pw_map, rs_pat_ids, 'rnaseq-kms')
# Rppa Data
rp_GE, rp_uni_ids = self.preprocess_seq_patient_data(rp_GE, rp_ent_ids)
all_rp_pw_map = self.read_pathways()
labeled_all_rp_pw_map = self.label_rppa_patient_genes(all_rp_pw_map, rp_pat_ids, rp_GE, rp_uni_ids)
rp_kernels = self.create_seq_kernels(labeled_all_rp_pw_map, rp_pat_ids, 'rppa-kms')
# Somatic mutation data
som_patients = self.preprocess_som_patient_data(som_patients)
all_som_pw_map = self.read_pathways()
labeled_all_som_pw_map = self.label_som_patient_genes(all_som_pw_map, som_patients)
som_kernels = self.create_som_kernels(labeled_all_som_pw_map, som_patients)
kernels = np.concatenate((rs_kernels, rp_kernels, som_kernels))
total = kernels.shape[1] * kernels.shape[2]
limit = (self.drop_percent * total) / 100.0
valid_kernels = kernels[np.count_nonzero(kernels, axis=(1, 2)) >= limit]
log(f'kernel_count={kernels.shape[0]} valid_kernel_count={valid_kernels.shape[0]}')
cluster_sizes = [2, 3, 4, 5]
for k in cluster_sizes:
log(f'Running clustering for k={k}')
self.cluster(valid_kernels, k)
self.label_analyzer = LabelAnalysis(results_dir=self.result_dir, methods=['mkkm', 'kmeans'],
cluster_sizes=cluster_sizes, log2_lambdas=self.log2_lambdas)
self.label_analyzer.run()
def create_experiment(*nargs):
global args
if __name__ == '__main__': # if running directly use command line arguments
args = parser.parse_args()
else: # otherwise use user given arguments
args = parser.parse_args(nargs)
print_args(args)
return Experiment1(args)
if __name__ == '__main__':
create_experiment().run()
| [
"sklearn.cluster.SpectralClustering",
"snf_simple.SNF",
"pamogk.label_mapper.mark_cont_label_on_pathways",
"mkkm_mr.mkkm_mr",
"mkkm_mr.lib.combine_kernels",
"pamogk.pathway_reader.cx_pathway_reader.read_pathways",
"mkkm_mr.lib.normalize_unit_row",
"pamogk.result_processor.label_analysis.LabelAnalysis"... | [((4840, 4929), 'pamogk.data_processor.rnaseq_processor.process', 'rp.process', (['self.args.rnaseq_patient_data', 'self.args.continuous', 'self.args.threshold'], {}), '(self.args.rnaseq_patient_data, self.args.continuous, self.args.\n threshold)\n', (4850, 4929), True, 'from pamogk.data_processor import rnaseq_processor as rp, synapse_rppa_processor as rpp\n'), ((5270, 5358), 'pamogk.data_processor.synapse_rppa_processor.process', 'rpp.process', (['self.args.rppa_patient_data', 'self.args.continuous', 'self.args.threshold'], {}), '(self.args.rppa_patient_data, self.args.continuous, self.args.\n threshold)\n', (5281, 5358), True, 'from pamogk.data_processor import rnaseq_processor as rp, synapse_rppa_processor as rpp\n'), ((7628, 7657), 'pamogk.gene_mapper.uniprot_mapper.json_to_dict', 'uniprot_mapper.json_to_dict', ([], {}), '()\n', (7655, 7657), False, 'from pamogk.gene_mapper import uniprot_mapper\n'), ((8233, 8262), 'pamogk.gene_mapper.uniprot_mapper.json_to_dict', 'uniprot_mapper.json_to_dict', ([], {}), '()\n', (8260, 8262), False, 'from pamogk.gene_mapper import uniprot_mapper\n'), ((8844, 8865), 'pamogk.pathway_reader.cx_pathway_reader.read_pathways', 'cx_pw.read_pathways', ([], {}), '()\n', (8863, 8865), True, 'from pamogk.pathway_reader import cx_pathway_reader as cx_pw\n'), ((9406, 9431), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9429, 9431), False, 'import collections\n'), ((9845, 9870), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (9868, 9870), False, 'import collections\n'), ((10279, 10304), 'collections.OrderedDict', 'collections.OrderedDict', ([], {}), '()\n', (10302, 10304), False, 'import collections\n'), ((20544, 20577), 'mkkm_mr.lib.normalize_unit_row', 'mkkm_mr.lib.normalize_unit_row', (['U'], {}), '(U)\n', (20574, 20577), False, 'import mkkm_mr\n'), ((20928, 20958), 'snf_simple.SNF', 'SNF', (['kernels'], {'K': 'snf_K', 't': 'snf_t'}), '(kernels, K=snf_K, t=snf_t)\n', (20931, 20958), False, 'from snf_simple import SNF\n'), ((21333, 21371), 'mkkm_mr.lib.kernel_centralize', 'mkkm_mr.lib.kernel_centralize', (['kernels'], {}), '(kernels)\n', (21362, 21371), False, 'import mkkm_mr\n'), ((21385, 21417), 'mkkm_mr.lib.kernel_normalize', 'mkkm_mr.lib.kernel_normalize', (['KH'], {}), '(KH)\n', (21413, 21417), False, 'import mkkm_mr\n'), ((21519, 21558), 'mkkm_mr.lib.combine_kernels', 'mkkm_mr.lib.combine_kernels', (['KH', 'gamma0'], {}), '(KH, gamma0)\n', (21546, 21558), False, 'import mkkm_mr\n'), ((21572, 21622), 'mkkm_mr.lib.kernel_kmeans_iter', 'mkkm_mr.lib.kernel_kmeans_iter', (['avgKer', 'n_clusters'], {}), '(avgKer, n_clusters)\n', (21602, 21622), False, 'import mkkm_mr\n'), ((21808, 21828), 'mkkm_mr.lib.calM', 'mkkm_mr.lib.calM', (['KH'], {}), '(KH)\n', (21824, 21828), False, 'import mkkm_mr\n'), ((22658, 22727), 'pamogk.kernels.lmkkmeans_train.lmkkmeans_train', 'lmkkmeans_train', (['kernels'], {'cluster_count': 'n_clusters', 'iteration_count': '(5)'}), '(kernels, cluster_count=n_clusters, iteration_count=5)\n', (22673, 22727), False, 'from pamogk.kernels.lmkkmeans_train import lmkkmeans_train\n'), ((25249, 25384), 'pamogk.result_processor.label_analysis.LabelAnalysis', 'LabelAnalysis', ([], {'results_dir': 'self.result_dir', 'methods': "['mkkm', 'kmeans']", 'cluster_sizes': 'cluster_sizes', 'log2_lambdas': 'self.log2_lambdas'}), "(results_dir=self.result_dir, methods=['mkkm', 'kmeans'],\n cluster_sizes=cluster_sizes, log2_lambdas=self.log2_lambdas)\n", (25262, 25384), False, 'from pamogk.result_processor.label_analysis import LabelAnalysis\n'), ((9667, 9688), 'networkx.read_gpickle', 'nx.read_gpickle', (['path'], {}), '(path)\n', (9682, 9688), True, 'import networkx as nx\n'), ((10102, 10123), 'networkx.read_gpickle', 'nx.read_gpickle', (['path'], {}), '(path)\n', (10117, 10123), True, 'import networkx as nx\n'), ((10526, 10547), 'networkx.read_gpickle', 'nx.read_gpickle', (['path'], {}), '(path)\n', (10541, 10547), True, 'import networkx as nx\n'), ((10903, 10929), 'networkx.write_gpickle', 'nx.write_gpickle', (['pw', 'path'], {}), '(pw, path)\n', (10919, 10929), True, 'import networkx as nx\n'), ((11253, 11279), 'networkx.write_gpickle', 'nx.write_gpickle', (['pw', 'path'], {}), '(pw, path)\n', (11269, 11279), True, 'import networkx as nx\n'), ((11592, 11618), 'networkx.write_gpickle', 'nx.write_gpickle', (['pw', 'path'], {}), '(pw, path)\n', (11608, 11618), True, 'import networkx as nx\n'), ((17889, 17967), 'pamogk.label_mapper.mark_label_on_pathways', 'label_mapper.mark_label_on_pathways', (['"""som"""', 'pid', 'all_pw_map', 'genes', 'self.label'], {}), "('som', pid, all_pw_map, genes, self.label)\n", (17924, 17967), False, 'from pamogk import label_mapper\n'), ((18601, 18730), 'pamogk.kernels.pamogk.kernel', 'kernel', (['pat_ids', 'pw'], {'label_key': 'f"""label-oe-{self.label}"""', 'alpha': 'self.smoothing_alpha', 'normalization': 'self.kernel_normalization'}), "(pat_ids, pw, label_key=f'label-oe-{self.label}', alpha=self.\n smoothing_alpha, normalization=self.kernel_normalization)\n", (18607, 18730), False, 'from pamogk.kernels.pamogk import kernel\n'), ((19121, 19250), 'pamogk.kernels.pamogk.kernel', 'kernel', (['pat_ids', 'pw'], {'label_key': 'f"""label-ue-{self.label}"""', 'alpha': 'self.smoothing_alpha', 'normalization': 'self.kernel_normalization'}), "(pat_ids, pw, label_key=f'label-ue-{self.label}', alpha=self.\n smoothing_alpha, normalization=self.kernel_normalization)\n", (19127, 19250), False, 'from pamogk.kernels.pamogk import kernel\n'), ((20131, 20246), 'pamogk.kernels.pamogk.kernel', 'kernel', (['pat_ids', 'pw'], {'label_key': '"""label-som"""', 'alpha': 'self.smoothing_alpha', 'normalization': 'self.kernel_normalization'}), "(pat_ids, pw, label_key='label-som', alpha=self.smoothing_alpha,\n normalization=self.kernel_normalization)\n", (20137, 20246), False, 'from pamogk.kernels.pamogk import kernel\n'), ((22062, 22105), 'mkkm_mr.mkkm_mr', 'mkkm_mr.mkkm_mr', (['KH', 'M', 'n_clusters', 'lambda_'], {}), '(KH, M, n_clusters, lambda_)\n', (22077, 22105), False, 'import mkkm_mr\n'), ((5828, 5881), 'pamogk.config.get_safe_data_file', 'config.get_safe_data_file', (['self.args.som_patient_data'], {}), '(self.args.som_patient_data)\n', (5853, 5881), False, 'from pamogk import config\n'), ((12882, 12969), 'pamogk.label_mapper.mark_cont_label_on_pathways', 'label_mapper.mark_cont_label_on_pathways', (['"""oe"""', 'pid', 'all_pw_map', 'uni_ids', 'gene_vals'], {}), "('oe', pid, all_pw_map, uni_ids,\n gene_vals)\n", (12922, 12969), False, 'from pamogk import label_mapper\n'), ((12982, 13086), 'pamogk.label_mapper.mark_extra_label_on_pathways', 'label_mapper.mark_extra_label_on_pathways', (['f"""oe-{self.label}"""', 'pid', 'all_pw_map', '"""oe"""', 'self.threshold'], {}), "(f'oe-{self.label}', pid,\n all_pw_map, 'oe', self.threshold)\n", (13023, 13086), False, 'from pamogk import label_mapper\n'), ((13201, 13288), 'pamogk.label_mapper.mark_cont_label_on_pathways', 'label_mapper.mark_cont_label_on_pathways', (['"""ue"""', 'pid', 'all_pw_map', 'uni_ids', 'gene_vals'], {}), "('ue', pid, all_pw_map, uni_ids,\n gene_vals)\n", (13241, 13288), False, 'from pamogk import label_mapper\n'), ((13301, 13405), 'pamogk.label_mapper.mark_extra_label_on_pathways', 'label_mapper.mark_extra_label_on_pathways', (['f"""ue-{self.label}"""', 'pid', 'all_pw_map', '"""ue"""', 'self.threshold'], {}), "(f'ue-{self.label}', pid,\n all_pw_map, 'ue', self.threshold)\n", (13342, 13405), False, 'from pamogk import label_mapper\n'), ((13708, 13785), 'pamogk.label_mapper.mark_label_on_pathways', 'label_mapper.mark_label_on_pathways', (['"""oe"""', 'pid', 'all_pw_map', 'genes', 'self.label'], {}), "('oe', pid, all_pw_map, genes, self.label)\n", (13743, 13785), False, 'from pamogk import label_mapper\n'), ((14077, 14154), 'pamogk.label_mapper.mark_label_on_pathways', 'label_mapper.mark_label_on_pathways', (['"""ue"""', 'pid', 'all_pw_map', 'genes', 'self.label'], {}), "('ue', pid, all_pw_map, genes, self.label)\n", (14112, 14154), False, 'from pamogk import label_mapper\n'), ((15481, 15568), 'pamogk.label_mapper.mark_cont_label_on_pathways', 'label_mapper.mark_cont_label_on_pathways', (['"""oe"""', 'pid', 'all_pw_map', 'uni_ids', 'gene_vals'], {}), "('oe', pid, all_pw_map, uni_ids,\n gene_vals)\n", (15521, 15568), False, 'from pamogk import label_mapper\n'), ((15581, 15685), 'pamogk.label_mapper.mark_extra_label_on_pathways', 'label_mapper.mark_extra_label_on_pathways', (['f"""oe-{self.label}"""', 'pid', 'all_pw_map', '"""oe"""', 'self.threshold'], {}), "(f'oe-{self.label}', pid,\n all_pw_map, 'oe', self.threshold)\n", (15622, 15685), False, 'from pamogk import label_mapper\n'), ((15798, 15885), 'pamogk.label_mapper.mark_cont_label_on_pathways', 'label_mapper.mark_cont_label_on_pathways', (['"""ue"""', 'pid', 'all_pw_map', 'uni_ids', 'gene_vals'], {}), "('ue', pid, all_pw_map, uni_ids,\n gene_vals)\n", (15838, 15885), False, 'from pamogk import label_mapper\n'), ((15898, 16002), 'pamogk.label_mapper.mark_extra_label_on_pathways', 'label_mapper.mark_extra_label_on_pathways', (['f"""ue-{self.label}"""', 'pid', 'all_pw_map', '"""ue"""', 'self.threshold'], {}), "(f'ue-{self.label}', pid,\n all_pw_map, 'ue', self.threshold)\n", (15939, 16002), False, 'from pamogk import label_mapper\n'), ((16308, 16385), 'pamogk.label_mapper.mark_label_on_pathways', 'label_mapper.mark_label_on_pathways', (['"""oe"""', 'pid', 'all_pw_map', 'genes', 'self.label'], {}), "('oe', pid, all_pw_map, genes, self.label)\n", (16343, 16385), False, 'from pamogk import label_mapper\n'), ((16680, 16757), 'pamogk.label_mapper.mark_label_on_pathways', 'label_mapper.mark_label_on_pathways', (['"""ue"""', 'pid', 'all_pw_map', 'genes', 'self.label'], {}), "('ue', pid, all_pw_map, genes, self.label)\n", (16715, 16757), False, 'from pamogk import label_mapper\n'), ((20593, 20647), 'sklearn.cluster.KMeans', 'KMeans', ([], {'n_clusters': 'n_clusters', 'max_iter': '(100)', 'n_init': '(50)'}), '(n_clusters=n_clusters, max_iter=100, n_init=50)\n', (20599, 20647), False, 'from sklearn.cluster import KMeans, SpectralClustering\n'), ((21157, 21211), 'sklearn.cluster.SpectralClustering', 'SpectralClustering', (['n_clusters'], {'affinity': '"""precomputed"""'}), "(n_clusters, affinity='precomputed')\n", (21175, 21211), False, 'from sklearn.cluster import KMeans, SpectralClustering\n')] |
from .constants import SPECIAL_TOKENS
try:
import re2 as re
except ImportError:
import re
def twitter_sentiment_token_matching(token):
"""Special token matching function for twitter sentiment data."""
if 'URL_TOKEN' in SPECIAL_TOKENS and re.match(r'https?:\/\/[^\s]+', token):
return SPECIAL_TOKENS['URL_TOKEN']
if 'POS_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\)|D|p)+', token):
return SPECIAL_TOKENS['POS_EM_TOKEN']
if 'NEG_EM_TOKEN' in SPECIAL_TOKENS and re.match(r':-?(\(|\\|/)+', token):
return SPECIAL_TOKENS['NEG_EM_TOKEN']
if 'USER_TOKEN' in SPECIAL_TOKENS and re.match(
r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)', token):
return SPECIAL_TOKENS['USER_TOKEN']
if 'HEART_TOKEN' in SPECIAL_TOKENS and re.match(r'<3+', token):
return SPECIAL_TOKENS['HEART_TOKEN']
| [
"re.match"
] | [((256, 295), 're.match', 're.match', (['"""https?:\\\\/\\\\/[^\\\\s]+"""', 'token'], {}), "('https?:\\\\/\\\\/[^\\\\s]+', token)\n", (264, 295), False, 'import re\n'), ((382, 414), 're.match', 're.match', (['""":-?(\\\\)|D|p)+"""', 'token'], {}), "(':-?(\\\\)|D|p)+', token)\n", (390, 414), False, 'import re\n'), ((506, 541), 're.match', 're.match', (['""":-?(\\\\(|\\\\\\\\|/)+"""', 'token'], {}), "(':-?(\\\\(|\\\\\\\\|/)+', token)\n", (514, 541), False, 'import re\n'), ((629, 701), 're.match', 're.match', (['"""(?<=^|(?<=[^a-zA-Z0-9-_\\\\.]))@([A-Za-z]+[A-Za-z0-9]+)"""', 'token'], {}), "('(?<=^|(?<=[^a-zA-Z0-9-_\\\\.]))@([A-Za-z]+[A-Za-z0-9]+)', token)\n", (637, 701), False, 'import re\n'), ((803, 825), 're.match', 're.match', (['"""<3+"""', 'token'], {}), "('<3+', token)\n", (811, 825), False, 'import re\n')] |
#
# Solver class using Scipy's adaptive time stepper
#
import casadi
import pybamm
import scipy.integrate as it
import numpy as np
class ScipySolver(pybamm.BaseSolver):
"""Solve a discretised model, using scipy._integrate.solve_ivp.
Parameters
----------
method : str, optional
The method to use in solve_ivp (default is "BDF")
rtol : float, optional
The relative tolerance for the solver (default is 1e-6).
atol : float, optional
The absolute tolerance for the solver (default is 1e-6).
"""
def __init__(self, method="BDF", rtol=1e-6, atol=1e-6):
super().__init__(method, rtol, atol)
self.ode_solver = True
self.name = "Scipy solver ({})".format(method)
pybamm.citations.register("virtanen2020scipy")
def _integrate(self, model, t_eval, inputs=None):
"""
Solve a model defined by dydt with initial conditions y0.
Parameters
----------
model : :class:`pybamm.BaseModel`
The model whose solution to calculate.
t_eval : :class:`numpy.array`, size (k,)
The times at which to compute the solution
inputs : dict, optional
Any input parameters to pass to the model when solving
Returns
-------
object
An object containing the times and values of the solution, as well as
various diagnostic messages.
"""
if model.convert_to_format == "casadi":
inputs = casadi.vertcat(*[x for x in inputs.values()])
extra_options = {"rtol": self.rtol, "atol": self.atol}
# check for user-supplied Jacobian
implicit_methods = ["Radau", "BDF", "LSODA"]
if np.any([self.method in implicit_methods]):
if model.jacobian_eval:
extra_options.update(
{"jac": lambda t, y: model.jacobian_eval(t, y, inputs)}
)
# make events terminal so that the solver stops when they are reached
if model.terminate_events_eval:
def event_wrapper(event):
def event_fn(t, y):
return event(t, y, inputs)
event_fn.terminal = True
return event_fn
events = [event_wrapper(event) for event in model.terminate_events_eval]
extra_options.update({"events": events})
sol = it.solve_ivp(
lambda t, y: model.rhs_eval(t, y, inputs),
(t_eval[0], t_eval[-1]),
model.y0,
t_eval=t_eval,
method=self.method,
dense_output=True,
**extra_options
)
if sol.success:
# Set the reason for termination
if sol.message == "A termination event occurred.":
termination = "event"
t_event = []
for time in sol.t_events:
if time.size > 0:
t_event = np.append(t_event, np.max(time))
t_event = np.array([np.max(t_event)])
y_event = sol.sol(t_event)
elif sol.message.startswith("The solver successfully reached the end"):
termination = "final time"
t_event = None
y_event = np.array(None)
return pybamm.Solution(sol.t, sol.y, t_event, y_event, termination)
else:
raise pybamm.SolverError(sol.message)
| [
"pybamm.SolverError",
"pybamm.citations.register",
"numpy.any",
"numpy.max",
"numpy.array",
"pybamm.Solution"
] | [((748, 794), 'pybamm.citations.register', 'pybamm.citations.register', (['"""virtanen2020scipy"""'], {}), "('virtanen2020scipy')\n", (773, 794), False, 'import pybamm\n'), ((1734, 1775), 'numpy.any', 'np.any', (['[self.method in implicit_methods]'], {}), '([self.method in implicit_methods])\n', (1740, 1775), True, 'import numpy as np\n'), ((3332, 3392), 'pybamm.Solution', 'pybamm.Solution', (['sol.t', 'sol.y', 't_event', 'y_event', 'termination'], {}), '(sol.t, sol.y, t_event, y_event, termination)\n', (3347, 3392), False, 'import pybamm\n'), ((3425, 3456), 'pybamm.SolverError', 'pybamm.SolverError', (['sol.message'], {}), '(sol.message)\n', (3443, 3456), False, 'import pybamm\n'), ((3298, 3312), 'numpy.array', 'np.array', (['None'], {}), '(None)\n', (3306, 3312), True, 'import numpy as np\n'), ((3053, 3068), 'numpy.max', 'np.max', (['t_event'], {}), '(t_event)\n', (3059, 3068), True, 'import numpy as np\n'), ((3003, 3015), 'numpy.max', 'np.max', (['time'], {}), '(time)\n', (3009, 3015), True, 'import numpy as np\n')] |
import datetime
from dateutil.parser import parse
from mongoengine import DateTimeField, FileField
from mongoengine.connection import DEFAULT_CONNECTION_NAME
#from mongoengine.python_support import str_types
from six import string_types as str_types
import io
from django.conf import settings
if settings.FILE_DB == settings.S3:
import crits.core.s3_tools as S3
class CritsDateTimeField(DateTimeField):
"""
Custom MongoEngine DateTimeField. Utilizes a transform such that if the
value passed in is a string we will convert it to a datetime.datetime
object, or if it is set to None we will use the current datetime (useful
when instantiating new objects and wanting the default dates to all be the
current datetime).
"""
def __set__(self, instance, value):
value = self.transform(value)
return super(CritsDateTimeField, self).__set__(instance, value)
def transform(self, value):
if value and isinstance(value, basestring):
return parse(value, fuzzy=True)
elif not value:
return datetime.datetime.now()
else:
return value
class S3Proxy(object):
"""
Custom proxy for MongoEngine which uses S3 to store binaries instead of
GridFS.
"""
def __init__(self, grid_id=None, key=None, instance=None,
db_alias=DEFAULT_CONNECTION_NAME, collection_name='fs'):
self.grid_id = grid_id # Store id for file
self.key = key
self.instance = instance
self.db_alias = db_alias
self.collection_name = collection_name
self.newfile = None # Used for partial writes
self.gridout = None
def __getattr__(self, name):
attrs = ('_fs', 'grid_id', 'key', 'instance', 'db_alias',
'collection_name', 'newfile', 'gridout')
if name in attrs:
return self.__getattribute__(name)
obj = self.get()
if name in dir(obj):
return getattr(obj, name)
raise AttributeError
def __get__(self, instance, value):
return self
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.grid_id)
def delete(self):
# Delete file from S3, FileField still remains
S3.delete_file_s3(self.grid_id,self.collection_name)
self.grid_id = None
self.gridout = None
self._mark_as_changed()
def get(self, id=None):
if id:
self.grid_id = id
if self.grid_id is None:
return None
try:
if self.gridout is None:
self.gridout = io.BytesIO(S3.get_file_s3(self.grid_id, self.collection_name))
return self.gridout
except:
return None
def put(self, file_obj, **kwargs):
if self.grid_id:
raise Exception('This document already has a file. Either delete '
'it or call replace to overwrite it')
self.grid_id = S3.put_file_s3(file_obj, self.collection_name)
self._mark_as_changed()
def read(self, size=-1):
gridout = self.get()
if gridout is None:
return None
else:
try:
return gridout.read(size)
except:
return ""
def _mark_as_changed(self):
"""Inform the instance that `self.key` has been changed"""
if self.instance:
self.instance._mark_as_changed(self.key)
class S3FileField(FileField):
"""
Custom FileField for MongoEngine which utilizes S3.
"""
def __init__(self, db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs",
**kwargs):
super(S3FileField, self).__init__(db_alias, collection_name, **kwargs)
self.proxy_class = S3Proxy
def __set__(self, instance, value):
key = self.name
if ((hasattr(value, 'read') and not
isinstance(value, self.proxy_class)) or isinstance(value, str_types)):
# using "FileField() = file/string" notation
grid_file = instance._data.get(self.name)
# If a file already exists, delete it
if grid_file:
try:
grid_file.delete()
except:
pass
# Create a new file with the new data
grid_file.put(value)
else:
# Create a new proxy object as we don't already have one
instance._data[key] = self.proxy_class(key=key, instance=instance,
collection_name=self.collection_name)
instance._data[key].put(value)
else:
instance._data[key] = value
instance._mark_as_changed(key)
def getFileField(db_alias=DEFAULT_CONNECTION_NAME, collection_name="fs", **kwargs):
"""
Determine if the admin has configured CRITs to utilize GridFS or S3 for
binary storage.
"""
if settings.FILE_DB == settings.GRIDFS:
return FileField(db_alias, collection_name, **kwargs)
elif settings.FILE_DB == settings.S3:
return S3FileField(db_alias, collection_name, **kwargs)
| [
"mongoengine.FileField",
"dateutil.parser.parse",
"crits.core.s3_tools.delete_file_s3",
"datetime.datetime.now",
"crits.core.s3_tools.put_file_s3",
"crits.core.s3_tools.get_file_s3"
] | [((2267, 2320), 'crits.core.s3_tools.delete_file_s3', 'S3.delete_file_s3', (['self.grid_id', 'self.collection_name'], {}), '(self.grid_id, self.collection_name)\n', (2284, 2320), True, 'import crits.core.s3_tools as S3\n'), ((2992, 3038), 'crits.core.s3_tools.put_file_s3', 'S3.put_file_s3', (['file_obj', 'self.collection_name'], {}), '(file_obj, self.collection_name)\n', (3006, 3038), True, 'import crits.core.s3_tools as S3\n'), ((5051, 5097), 'mongoengine.FileField', 'FileField', (['db_alias', 'collection_name'], {}), '(db_alias, collection_name, **kwargs)\n', (5060, 5097), False, 'from mongoengine import DateTimeField, FileField\n'), ((1010, 1034), 'dateutil.parser.parse', 'parse', (['value'], {'fuzzy': '(True)'}), '(value, fuzzy=True)\n', (1015, 1034), False, 'from dateutil.parser import parse\n'), ((1078, 1101), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1099, 1101), False, 'import datetime\n'), ((2632, 2682), 'crits.core.s3_tools.get_file_s3', 'S3.get_file_s3', (['self.grid_id', 'self.collection_name'], {}), '(self.grid_id, self.collection_name)\n', (2646, 2682), True, 'import crits.core.s3_tools as S3\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import unittest
COLOR = "shiny gold"
FNAME = "input.txt"
N_ITER = 1e7
TEST_FNAME = "test_input.txt"
def main():
"""Main function."""
data = load_input(FNAME)
part1(data)
part2(data)
print("\nUnittests")
unittest.main()
def part1(data):
"""Solution to day 7, part 1."""
for rule in data:
Bag(rule)
n_bags = Bag.n_bags_containing_specific_bag(COLOR)
print(f"{n_bags} bags can contain at least one {COLOR} bag.")
return n_bags
def part2(data):
"""Solution to day 7, part 2."""
for rule in data:
Bag(rule)
n_bags = Bag.n_bags_inside(COLOR)
print(f"One {COLOR} bag contains {n_bags} other bags.")
return n_bags
def load_input(fname):
"""Read in the data, return as a list."""
with open(fname, "r") as f:
data = f.readlines()
data = [x.strip("\n") for x in data]
return data
class Bag:
all_bags = {}
def __init__(self, rule):
self.color, self.descendants = self.init_bag(rule)
self.no_descendants = not bool(self.descendants)
Bag.all_bags[self.color] = self
def init_bag(self, rule):
"""Get the color of the bag and its descendants.
Parameters
----------
rule : str
Contains the rule defining the bag, e.g.:
shiny gold bags contain 1 dark olive bag, 2 vibrant plum bags.
Returns
-------
color : str
The color of the bag, e.g., `dark olive`
descendants_dict : dict
A dictionary with the keys being the colors of the bags
contained in this bag and the values being the corresponding
amount of bags of the color.
"""
color, descendants = rule.split(" bags contain ")
descendants_dict = {}
for desc in descendants.split(","):
match = re.match(r"(\d+) ([a-z]+ [a-z]+) bags?",
desc.strip())
if match is None:
return color, None
else:
amount = int(match.group(1))
descendant_color = match.group(2)
descendants_dict[descendant_color] = amount
return color, descendants_dict
def bag_in_descendants(self, bag_color, n_iter):
"""Check if bag_color is in this bag or in its descendants.
This function recursively looks for the bag in question. There
surely are more efficient ways to do this but I think this is
quite intuitive and understandable.
"""
# Prevent an infinite loop.
if n_iter > N_ITER:
raise RuntimeError("Exceeded maximum number of iterations!")
if self.color==bag_color:
return True
if self.no_descendants:
return False
for descendant_bag_color in self.descendants.keys():
descendant_bag = Bag.all_bags[descendant_bag_color]
if descendant_bag.bag_in_descendants(bag_color, n_iter+1):
return True
return False
def n_bags_in_descendants(self, n_iter):
"""Return the number of bags in the descendants of this bag.
Note
----
This includes the bag itself, e.g., consider one red bag
containing four green bags. In that case, the function would
return 5 (and not 4).
"""
# Prevent an infinite loop.
if n_iter > N_ITER:
raise RuntimeError("Exceeded maximum number of iterations!")
if self.no_descendants:
return 0
n_iter += 1
bags_inside = 0
for descendant_color, descendant_num in self.descendants.items():
descendant_bag = Bag.all_bags[descendant_color]
if descendant_bag.no_descendants:
bags_inside += descendant_num
else:
bags_inside += (
descendant_num
* descendant_bag.n_bags_in_descendants(n_iter))
bags_inside += 1
return bags_inside
@classmethod
def n_bags_containing_specific_bag(cls, bag_color):
"""Return the number of bags containing the bag `bag_color`"""
n_bags = 0
for bag in Bag.all_bags.values():
if bag is Bag.all_bags[COLOR]:
continue
n_bags += int(bag.bag_in_descendants(COLOR, 0))
return n_bags
@classmethod
def n_bags_inside(self, bag_color):
"""Return the number of bags inside the bag `bag_color`."""
n_bags = Bag.all_bags[bag_color].n_bags_in_descendants(0)
n_bags -= 1 # Substract the bag itself.
return n_bags
class TestMethods(unittest.TestCase):
def setUp(self):
Bag.all_bags = {}
self.data = load_input(TEST_FNAME)
def test_part1(self):
counts = part1(self.data)
self.assertEqual(counts, 4)
def test_part2(self):
counts = part2(self.data)
self.assertEqual(counts, 32)
if __name__=="__main__":
main()
| [
"unittest.main"
] | [((292, 307), 'unittest.main', 'unittest.main', ([], {}), '()\n', (305, 307), False, 'import unittest\n')] |
#!/usr/bin/env python3
#coding: UTF-8
import os
import sys
import time
import json
import argparse
from os.path import join, exists, dirname
from upgrade import check_upgrade
from utils import call, get_conf, get_script, get_command_output, get_install_dir
installdir = get_install_dir()
topdir = dirname(installdir)
def watch_controller():
maxretry = 4
retry = 0
while retry < maxretry:
controller_pid = get_command_output('ps aux | grep seafile-controller | grep -v grep || true').strip()
garbage_collector_pid = get_command_output('ps aux | grep /scripts/gc.sh | grep -v grep || true').strip()
if not controller_pid and not garbage_collector_pid:
retry += 1
else:
retry = 0
time.sleep(5)
print('seafile controller exited unexpectedly.')
sys.exit(1)
def main(args):
call('/scripts/create_data_links.sh')
# check_upgrade()
os.chdir(installdir)
call('service nginx start &')
admin_pw = {
'email': get_conf('SEAFILE_ADMIN_EMAIL', '<EMAIL>'),
'password': get_conf('SEAFILE_ADMIN_PASSWORD', '<PASSWORD>'),
}
password_file = join(topdir, 'conf', 'admin.txt')
with open(password_file, 'w+') as fp:
json.dump(admin_pw, fp)
try:
call('{} start'.format(get_script('seafile.sh')))
call('{} start'.format(get_script('seahub.sh')))
if args.mode == 'backend':
call('{} start'.format(get_script('seafile-background-tasks.sh')))
finally:
if exists(password_file):
os.unlink(password_file)
print('seafile server is running now.')
try:
watch_controller()
except KeyboardInterrupt:
print('Stopping seafile server.')
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Seafile cluster start script')
parser.add_argument('--mode')
main(parser.parse_args())
| [
"os.path.exists",
"utils.get_conf",
"argparse.ArgumentParser",
"os.path.join",
"time.sleep",
"os.chdir",
"os.path.dirname",
"utils.get_command_output",
"utils.get_script",
"os.unlink",
"sys.exit",
"utils.call",
"json.dump",
"utils.get_install_dir"
] | [((273, 290), 'utils.get_install_dir', 'get_install_dir', ([], {}), '()\n', (288, 290), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((300, 319), 'os.path.dirname', 'dirname', (['installdir'], {}), '(installdir)\n', (307, 319), False, 'from os.path import join, exists, dirname\n'), ((828, 839), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (836, 839), False, 'import sys\n'), ((861, 898), 'utils.call', 'call', (['"""/scripts/create_data_links.sh"""'], {}), "('/scripts/create_data_links.sh')\n", (865, 898), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((925, 945), 'os.chdir', 'os.chdir', (['installdir'], {}), '(installdir)\n', (933, 945), False, 'import os\n'), ((950, 979), 'utils.call', 'call', (['"""service nginx start &"""'], {}), "('service nginx start &')\n", (954, 979), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((1155, 1188), 'os.path.join', 'join', (['topdir', '"""conf"""', '"""admin.txt"""'], {}), "(topdir, 'conf', 'admin.txt')\n", (1159, 1188), False, 'from os.path import join, exists, dirname\n'), ((1801, 1868), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Seafile cluster start script"""'}), "(description='Seafile cluster start script')\n", (1824, 1868), False, 'import argparse\n'), ((757, 770), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (767, 770), False, 'import time\n'), ((1015, 1057), 'utils.get_conf', 'get_conf', (['"""SEAFILE_ADMIN_EMAIL"""', '"""<EMAIL>"""'], {}), "('SEAFILE_ADMIN_EMAIL', '<EMAIL>')\n", (1023, 1057), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((1079, 1127), 'utils.get_conf', 'get_conf', (['"""SEAFILE_ADMIN_PASSWORD"""', '"""<PASSWORD>"""'], {}), "('SEAFILE_ADMIN_PASSWORD', '<PASSWORD>')\n", (1087, 1127), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((1239, 1262), 'json.dump', 'json.dump', (['admin_pw', 'fp'], {}), '(admin_pw, fp)\n', (1248, 1262), False, 'import json\n'), ((1527, 1548), 'os.path.exists', 'exists', (['password_file'], {}), '(password_file)\n', (1533, 1548), False, 'from os.path import join, exists, dirname\n'), ((1562, 1586), 'os.unlink', 'os.unlink', (['password_file'], {}), '(password_file)\n', (1571, 1586), False, 'import os\n'), ((1748, 1759), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (1756, 1759), False, 'import sys\n'), ((429, 506), 'utils.get_command_output', 'get_command_output', (['"""ps aux | grep seafile-controller | grep -v grep || true"""'], {}), "('ps aux | grep seafile-controller | grep -v grep || true')\n", (447, 506), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((547, 620), 'utils.get_command_output', 'get_command_output', (['"""ps aux | grep /scripts/gc.sh | grep -v grep || true"""'], {}), "('ps aux | grep /scripts/gc.sh | grep -v grep || true')\n", (565, 620), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((1305, 1329), 'utils.get_script', 'get_script', (['"""seafile.sh"""'], {}), "('seafile.sh')\n", (1315, 1329), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((1363, 1386), 'utils.get_script', 'get_script', (['"""seahub.sh"""'], {}), "('seahub.sh')\n", (1373, 1386), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n'), ((1459, 1500), 'utils.get_script', 'get_script', (['"""seafile-background-tasks.sh"""'], {}), "('seafile-background-tasks.sh')\n", (1469, 1500), False, 'from utils import call, get_conf, get_script, get_command_output, get_install_dir\n')] |
import sys, os
import nltk
import numpy as np
class Patch():
def __init__(self):
self.id = -1
self.parent_code = ''
self.child_code = ''
self.patches = []
self.verdict = False
self.distance = 0
self.verdict_token = False
pass
def __repr__(self):
return str(self.id) + '\n' + ' '.join(self.parent_code) + '\n' + ' '.join(self.child_code) \
+ '\n' + str(self.distance) + '\n' + str(self.verdict)
def read_patch(file_path, size):
num_line_per_patch = size * 2 + 9
patches_lines = []
with open(file_path) as f:
patch = []
for ln, line in enumerate(f):
line = line.strip()
if (ln % num_line_per_patch == 0) and (ln != 0):
patches_lines.append([l for l in patch])
patch = []
patch.append(line)
patches_lines.append(patch)
patches = []
for lines in patches_lines:
ex = Patch()
ex.id = int(lines[0])
ex.parent_code = [token.strip() for token in lines[1].split()]
ex.child_code = [token.strip() for token in lines[3].split()]
ex.patches = []
for gen_idx in range(size):
cidx = gen_idx * 2
didx = cidx + 1
ex.patches.append([lines[cidx + 7], int(lines[didx + 7])])
verdict = lines[-2].strip()
if verdict == 'True':
ex.verdict = True
else:
ex.verdict = False
# print(verdict)
ex.distance = nltk.edit_distance([token.strip() for token in ex.parent_code],
[token.strip() for token in ex.child_code])
patches.append(ex)
return np.asarray(patches)
def de_duplicate_patches(patches):
patch_map = {}
for pidx, patch in enumerate(patches):
key = ' '.join(patch.parent_code) + ' '.join(patch.child_code)
if key not in patch_map.keys():
patch_map[key] = []
patch_map[key].append([patch, pidx])
unique_indices = []
for key in patch_map:
ps = patch_map[key]
if len(ps) == 1:
unique_indices.append(ps[0][1])
else:
idx = -1
for pi, p in enumerate(ps):
if p[0].verdict:
idx = pi
unique_indices.append(ps[idx][1])
return unique_indices
pass
if __name__ == '__main__':
result_base = '/home/sc2nf/codit-clone'
option = 'token' # 'token
size = 10
# if option == 'tree':
# file_name = 'codit-all-concrete_' + str(size) + '.2_' + str(2*size) + '_decode_res.txt'
# else:
# file_name = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_name_tree = 'codit-all-concrete_' + str(size) + '.2_' + str(2 * size) + '_decode_res.txt'
file_path_tree = result_base + '/' + file_name_tree
patches_tree = read_patch(file_path_tree, size)
unique_indices = de_duplicate_patches(patches_tree)
# unique_patches_tree = patches_tree[unique_indices]
# unique_count = len(unique_patches_tree)
file_name_token = 'codit.all.token.top.' + str(size) + '_' + str(size) + '_decode_res.txt'
file_path_token = result_base + '/' + file_name_token
patches_token = read_patch(file_path_token, size)
# unique_patches = patches_token[unique_indices]
unified_patches = []
for idx, (p_tree, p_token) in enumerate(zip(patches_tree, patches_token)):
if idx in unique_indices:
assert isinstance(p_tree, Patch) and isinstance(p_token, Patch)
p_tree.verdict_token = p_token.verdict
unified_patches.append(p_tree)
tree_count = np.sum([1 if p.verdict else 0 for p in unified_patches])
token_count = np.sum([1 if p.verdict_token else 0 for p in unified_patches])
tree_indices = set()
token_indices = set()
for i, p in enumerate(unified_patches):
if p.verdict:
tree_indices.add(i)
if p.verdict_token:
token_indices.add(i)
only_tree = tree_indices.difference(token_indices)
only_token = token_indices.difference(tree_indices)
common = tree_indices.intersection(token_indices)
print(tree_count, token_count, len(only_token), len(only_tree), len(common), len(unified_patches))
#
# total_success_tree = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(unique_patches, total_success_tree)
# tree_success_indices_in_unique = set()
# for idx, p in enumerate(unique_patches):
# if p.verdict:
# tree_success_indices_in_unique.add(idx)
#
#
#
# total_success_token = np.sum([1 if p.verdict else 0 for p in unique_patches])
# print(tree_count, total_success_token)
| [
"numpy.sum",
"numpy.asarray"
] | [((1726, 1745), 'numpy.asarray', 'np.asarray', (['patches'], {}), '(patches)\n', (1736, 1745), True, 'import numpy as np\n'), ((3701, 3759), 'numpy.sum', 'np.sum', (['[(1 if p.verdict else 0) for p in unified_patches]'], {}), '([(1 if p.verdict else 0) for p in unified_patches])\n', (3707, 3759), True, 'import numpy as np\n'), ((3776, 3840), 'numpy.sum', 'np.sum', (['[(1 if p.verdict_token else 0) for p in unified_patches]'], {}), '([(1 if p.verdict_token else 0) for p in unified_patches])\n', (3782, 3840), True, 'import numpy as np\n')] |
# Copyright (c) 2016-2018 <NAME>. All rights reserved. A
# copyright license for redistribution and use in source and binary forms,
# with or without modification, is hereby granted for non-commercial,
# experimental and research purposes, provided that the following conditions
# are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimers.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimers in the
# documentation and/or other materials provided with the distribution. If
# you wish to use this software commercially, kindly contact
# <EMAIL> to obtain a commercial license.
#
# This license extends only to copyright and does not include or grant any
# patent license or other license whatsoever.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import subprocess
import sys
import pysnark.options
def run(eksize, pksize, genmk=False):
"""
Run the qapgen tool
:param eksize: Desired master evaluation key size
:param pksize: Desired master public key size
:param genmk: True if a new master secret key should be generated, False otherwise
:return: None
"""
mskfile = pysnark.options.get_mskey_file()
mkeyfile = pysnark.options.get_mkey_file()
mpkeyfile = pysnark.options.get_mpkey_file()
if not genmk and not os.path.isfile(mskfile):
raise IOError("Could not enlarge master key materiak: master secret key missing")
print >> sys.stderr, "*** " + ("Generating" if genmk else "Enlarging") + " master key material"
if subprocess.call([pysnark.options.get_qaptool_exe("qapgen"), str(max(pksize,eksize,0)), str(max(pksize,0)),
mskfile, mkeyfile, mpkeyfile]) != 0:
sys.exit(2)
def get_mekey_size():
"""
Get the size (maximal exponent) of the current master evaluation key
:return: Size, or -1 if key does not exist
"""
try:
mekf = open(pysnark.options.get_mkey_file())
curmk = int(mekf.next().strip().split(" ")[2])
mekf.close()
return curmk
except IOError:
return -1
def get_mpkey_size():
"""
Get the size (maximal exponent) of the current master public key
:return: Size, or -1 if key does not exist
"""
try:
mpkf = open(pysnark.options.get_mpkey_file())
curmpk = int(mpkf.next().strip().split(" ")[2])
mpkf.close()
return curmpk
except IOError:
return -1
def ensure_mkey(eksize, pksize):
"""
Ensures that there are master evaluation and public keys of the given sizes.
If master evaluation/public keys exist but are to small, and there is no
master secret key, this raises an error.
If there is no key material at all, a fresh master secret key will be
generated.
:param eksize: Minimal evaluation key size (-1 if not needed)
:param pksize: Minimal public key size (-1 if not needed)
:return: Actual evaluation key, public key size after key generation
"""
curek = get_mekey_size()
curpk = get_mpkey_size()
havemsk = os.path.isfile(pysnark.options.get_mskey_file())
havekeys = os.path.isfile(pysnark.options.get_mpkey_file()) or os.path.isfile(pysnark.options.get_mkey_file())
if curek < eksize or curpk < pksize:
if havemsk:
run(max(curek, eksize), max(curpk, pksize), False)
return (max(curek, eksize), max(curpk, pksize))
elif havekeys:
raise IOError("Key material too small ("+str(curek)+","+str(curpk)+
")<("+str(eksize)+","+str(pksize)+") and missing master secret key")
else:
run(eksize, pksize, True)
return (eksize,pksize)
else:
return (curek,curpk)
if __name__ == "__main__":
if len(sys.argv)<3:
print >>sys.stderr, "*** Usage:", sys.argv[0], "<eksize>", "<pksize>"
sys.exit(2)
argeksize = int(sys.argv[1])
argpksize = int(sys.argv[2])
run(argeksize, argpksize, not os.path.isfile(pysnark.options.get_mskey_file()))
| [
"os.path.isfile",
"sys.exit"
] | [((2574, 2585), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (2582, 2585), False, 'import sys\n'), ((4734, 4745), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (4742, 4745), False, 'import sys\n'), ((2175, 2198), 'os.path.isfile', 'os.path.isfile', (['mskfile'], {}), '(mskfile)\n', (2189, 2198), False, 'import os\n')] |
import os
import platform
import unittest
# ZODB >= 3.9. The blob directory can be a private cache.
shared_blob_dir_choices = (False, True)
RUNNING_ON_TRAVIS = os.environ.get('TRAVIS')
RUNNING_ON_APPVEYOR = os.environ.get('APPVEYOR')
RUNNING_ON_CI = RUNNING_ON_TRAVIS or RUNNING_ON_APPVEYOR
def _do_not_skip(reason): # pylint:disable=unused-argument
def dec(f):
return f
return dec
if RUNNING_ON_CI:
skipOnCI = unittest.skip
else:
skipOnCI = _do_not_skip
if RUNNING_ON_APPVEYOR:
skipOnAppveyor = unittest.skip
else:
skipOnAppveyor = _do_not_skip
CACHE_SERVERS = None
CACHE_MODULE_NAME = None
if RUNNING_ON_TRAVIS:
# We expect to have access to a local memcache server
# on travis. Use it if we can import drivers.
# pylint:disable=unused-import
try:
import pylibmc
CACHE_SERVERS = ["localhost:11211"]
CACHE_MODULE_NAME = 'relstorage.pylibmc_wrapper'
except ImportError:
try:
import memcache
CACHE_SERVERS = ["localhost:11211"]
CACHE_MODULE_NAME = 'memcache'
except ImportError:
pass
USE_SMALL_BLOBS = ((RUNNING_ON_CI # slow here
or platform.system() == 'Darwin' # interactive testing
or os.environ.get("RS_SMALL_BLOB")) # define
and not os.environ.get('RS_LARGE_BLOB'))
# mysqlclient (aka MySQLdb) and possibly other things that
# use libmysqlclient.so will try to connect over the
# default Unix socket that was established when that
# library was compiled if no host is given. But that
# server may not be running, or may not be the one we want
# to use for testing, so explicitly ask it to use TCP
# socket by giving an IP address (using 'localhost' will
# still try to use the socket.) (The TCP port can be bound
# by non-root, but the default Unix socket often requires
# root permissions to open.)
STANDARD_DATABASE_SERVER_HOST = '127.0.0.1'
DEFAULT_DATABASE_SERVER_HOST = os.environ.get('RS_DB_HOST',
STANDARD_DATABASE_SERVER_HOST)
TEST_UNAVAILABLE_DRIVERS = not bool(os.environ.get('RS_SKIP_UNAVAILABLE_DRIVERS'))
if RUNNING_ON_CI:
TEST_UNAVAILABLE_DRIVERS = False
class MinimalTestLayer(object):
__bases__ = ()
__module__ = ''
def __init__(self, name):
self.__name__ = name
def setUp(self):
pass
def tearDown(self):
pass
def testSetUp(self):
pass
def testTearDown(self):
pass
class _Availability(object):
"""
Has a boolean value telling whether the driver or database is available,
and a string explaining why it is/is not.
"""
def __init__(self, factory, drivers, max_priority, use_adapter, db_name):
from relstorage.adapters.interfaces import DriverNotAvailableError
self.driver_name = factory.driver_name
self.escaped_driver_name = self.driver_name.replace(' ', '').replace('/', '_')
try:
self.driver = drivers.select_driver(self.driver_name)
except DriverNotAvailableError:
self.driver = None
self._available = self.driver is not None and self.driver.priority <= max_priority
if not self._available:
if self.driver is None:
msg = 'Driver %s is not installed' % (self.driver_name,)
else:
msg = 'Driver %s has test priority %d >= max %d' % (
self.driver_name, self.driver.priority, max_priority
)
else:
msg = 'Driver %s is installed' % (self.driver_name,)
self._msg = msg
if self.driver is not None:
type(self.driver).STRICT = True
if self._available:
# See if we can connect.
self.__check_db_access(use_adapter, db_name)
def __str__(self):
return self._msg
def __bool__(self):
return self._available
__nonzero__ = __bool__
def __check_db_access_cb(self, _conn, _cursor):
"Does nothing"
__check_db_access_cb.transaction_read_only = True
def __check_db_access(self, use_adapter, db_name):
# We need to get an adapter to get a connmanager to try to connect.
from relstorage.options import Options
options = Options(driver=self.driver_name)
adapter_maker = use_adapter()
adapter_maker.driver_name = self.driver_name
adapter = adapter_maker.make_adapter(options, db_name)
try:
adapter.connmanager.open_and_call(self.__check_db_access_cb)
except (TypeError, AttributeError):
raise
except Exception as e: # pylint:disable=broad-except
self._available = False
self._msg = "%s: Failed to connect: %r %s" % (self._msg, type(e), e)
class AbstractTestSuiteBuilder(object):
__name__ = None # PostgreSQL, MySQL, Oracle
# Drivers with a priority over this amount won't be part of the
# test run even if installed.
MAX_PRIORITY = int(os.environ.get('RS_MAX_TEST_PRIORITY', '100'))
# Ask the drivers to be in their strictest possible mode.
STRICT_DRIVER = True
def __init__(self, driver_options, use_adapter, extra_test_classes=()):
"""
:param driver_options: The ``IDBDriverOptions``
:param use_adapter: A mixin class implementing the abstract methods
defined by ``StorageCreatingMixin``.
"""
self.drivers = driver_options
self.extra_test_classes = extra_test_classes
self.base_dbname = os.environ.get('RELSTORAGETEST_DBNAME', 'relstoragetest')
self.db_names = {
'data': self.base_dbname,
'1': self.base_dbname,
'2': self.base_dbname + '2',
'dest': self.base_dbname + '2',
}
self.use_adapter = use_adapter
use_adapter.base_dbname = self.base_dbname
self.large_blob_size = self._compute_large_blob_size(USE_SMALL_BLOBS)
def _compute_large_blob_size(self, use_small_blobs):
raise NotImplementedError
def test_suite(self):
from .reltestbase import AbstractIDBDriverTest
from .reltestbase import AbstractIDBOptionsTest
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(type(
self.__name__ + 'DBOptionsTest',
(AbstractIDBOptionsTest,),
{'db_options': self.drivers}
)))
for factory in self.drivers.known_driver_factories():
available = _Availability(
factory, self.drivers, self.MAX_PRIORITY,
self.use_adapter,
self.db_names['data']
)
# On CI, we don't even add tests for unavailable drivers to the
# list of tests; this makes the output much shorter and easier to read,
# but it does make zope-testrunner's discovery options less useful.
if available or TEST_UNAVAILABLE_DRIVERS:
# Checking the driver is just a unit test, it doesn't connect or
# need a layer
suite.addTest(unittest.makeSuite(
self.__skipping_if_not_available(
type(
self.__name__ + 'DBDriverTest_' + available.escaped_driver_name,
(AbstractIDBDriverTest,),
{'driver': available.driver}
),
available)))
# We put the various drivers into a zope.testrunner layer
# for ease of selection by name, e.g.,
# zope-testrunner --layer PG8000Driver
driver_suite = unittest.TestSuite()
layer_name = '%s%s' % (
self.__name__,
available.escaped_driver_name,
)
driver_suite.layer = MinimalTestLayer(layer_name)
driver_suite.layer.__module__ = self.__module__
self._add_driver_to_suite(driver_suite, layer_name, available)
suite.addTest(driver_suite)
return suite
def _default_make_check_class(self, bases, name, klass_dict=None):
klass = type(
name,
(self.use_adapter,) + bases,
klass_dict or {}
)
return klass
def _make_check_classes(self):
# The classes that inherit from ZODB tests and use 'check' instead of 'test_'
# This class is sadly not super() cooperative, so we must
# try to explicitly put it last in the MRO.
from ZODB.tests.util import TestCase as ZODBTestCase
from .hftestbase import HistoryFreeFromFileStorage
from .hftestbase import HistoryFreeToFileStorage
from .hftestbase import HistoryFreeRelStorageTests
from .hptestbase import HistoryPreservingFromFileStorage
from .hptestbase import HistoryPreservingToFileStorage
from .hptestbase import HistoryPreservingRelStorageTests
classes = []
for _, bases in (
('HF', (HistoryFreeFromFileStorage,
HistoryFreeToFileStorage,
HistoryFreeRelStorageTests)),
('HP', (HistoryPreservingFromFileStorage,
HistoryPreservingToFileStorage,
HistoryPreservingRelStorageTests))
):
for base in bases:
name = self.__name__ + base.__name__
maker = getattr(self, '_make_check_class_' + base.__name__,
self._default_make_check_class)
__traceback_info__ = maker, base
klass = maker((base, ZODBTestCase), name)
klass.__module__ = self.__module__
klass.__name__ = name
classes.append(klass)
return classes
def _make_zodbconvert_classes(self):
from .reltestbase import AbstractRSDestZodbConvertTests
from .reltestbase import AbstractRSSrcZodbConvertTests
classes = []
for base in (AbstractRSSrcZodbConvertTests, AbstractRSDestZodbConvertTests):
klass = type(
self.__name__ + base.__name__[8:],
(self.use_adapter, base),
{}
)
klass.__module__ = self.__module__
classes.append(klass)
return classes
def __skipping_if_not_available(self, klass, availability):
klass.__module__ = self.__module__
klass = unittest.skipUnless(
availability,
str(availability))(klass)
return klass
def _new_class_for_driver(self, base, driver_available):
klass = type(
base.__name__ + '_' + driver_available.escaped_driver_name,
(base,),
{'driver_name': driver_available.driver_name}
)
return self.__skipping_if_not_available(klass, driver_available)
def _add_driver_to_suite(self, suite, layer_prefix, driver_available):
for klass in self._make_check_classes():
klass = self._new_class_for_driver(klass, driver_available)
suite.addTest(unittest.makeSuite(klass, "check"))
for klass in self._make_zodbconvert_classes():
suite.addTest(unittest.makeSuite(
self._new_class_for_driver(klass,
driver_available)))
for klass in self.extra_test_classes:
suite.addTest(unittest.makeSuite(
self._new_class_for_driver(klass,
driver_available)))
from relstorage.tests.blob.testblob import storage_reusable_suite
from relstorage.options import Options
from relstorage.storage import RelStorage
for shared_blob_dir in shared_blob_dir_choices:
for keep_history in (False, True):
# TODO: Make any of the tests that are needing this
# subclass StorageCreatingMixin so we unify where
# that's handled.
def create_storage(name, blob_dir,
shared_blob_dir=shared_blob_dir,
keep_history=keep_history, **kw):
if not driver_available:
raise unittest.SkipTest(str(driver_available))
assert 'driver' not in kw
kw['driver'] = driver_available.driver_name
db = self.db_names[name]
if not keep_history:
db += '_hf'
options = Options(
keep_history=keep_history,
shared_blob_dir=shared_blob_dir,
blob_dir=os.path.abspath(blob_dir),
**kw)
adapter_maker = self.use_adapter()
adapter_maker.driver_name = driver_available.driver_name
adapter = adapter_maker.make_adapter(options, db)
__traceback_info__ = adapter, options
storage = RelStorage(adapter, name=name, options=options)
storage.zap_all()
return storage
prefix = '%s_%s%s' % (
layer_prefix,
'Shared' if shared_blob_dir else 'Unshared',
'HistoryPreserving' if keep_history else 'HistoryFree',
)
# If the blob directory is a cache, don't test packing,
# since packing can not remove blobs from all caches.
test_packing = shared_blob_dir
suite.addTest(storage_reusable_suite(
prefix, create_storage,
keep_history=keep_history,
test_blob_storage_recovery=True,
test_packing=test_packing,
test_undo=keep_history,
test_blob_cache=(not shared_blob_dir),
# PostgreSQL blob chunks are max 2GB in size
large_blob_size=(not shared_blob_dir) and (self.large_blob_size) + 100,
storage_is_available=driver_available
))
return suite
| [
"unittest.TestSuite",
"unittest.makeSuite",
"relstorage.tests.blob.testblob.storage_reusable_suite",
"os.environ.get",
"relstorage.storage.RelStorage",
"relstorage.options.Options",
"platform.system",
"os.path.abspath"
] | [((163, 187), 'os.environ.get', 'os.environ.get', (['"""TRAVIS"""'], {}), "('TRAVIS')\n", (177, 187), False, 'import os\n'), ((210, 236), 'os.environ.get', 'os.environ.get', (['"""APPVEYOR"""'], {}), "('APPVEYOR')\n", (224, 236), False, 'import os\n'), ((1989, 2048), 'os.environ.get', 'os.environ.get', (['"""RS_DB_HOST"""', 'STANDARD_DATABASE_SERVER_HOST'], {}), "('RS_DB_HOST', STANDARD_DATABASE_SERVER_HOST)\n", (2003, 2048), False, 'import os\n'), ((1277, 1308), 'os.environ.get', 'os.environ.get', (['"""RS_SMALL_BLOB"""'], {}), "('RS_SMALL_BLOB')\n", (1291, 1308), False, 'import os\n'), ((1346, 1377), 'os.environ.get', 'os.environ.get', (['"""RS_LARGE_BLOB"""'], {}), "('RS_LARGE_BLOB')\n", (1360, 1377), False, 'import os\n'), ((2133, 2178), 'os.environ.get', 'os.environ.get', (['"""RS_SKIP_UNAVAILABLE_DRIVERS"""'], {}), "('RS_SKIP_UNAVAILABLE_DRIVERS')\n", (2147, 2178), False, 'import os\n'), ((4314, 4346), 'relstorage.options.Options', 'Options', ([], {'driver': 'self.driver_name'}), '(driver=self.driver_name)\n', (4321, 4346), False, 'from relstorage.options import Options\n'), ((5045, 5090), 'os.environ.get', 'os.environ.get', (['"""RS_MAX_TEST_PRIORITY"""', '"""100"""'], {}), "('RS_MAX_TEST_PRIORITY', '100')\n", (5059, 5090), False, 'import os\n'), ((5581, 5638), 'os.environ.get', 'os.environ.get', (['"""RELSTORAGETEST_DBNAME"""', '"""relstoragetest"""'], {}), "('RELSTORAGETEST_DBNAME', 'relstoragetest')\n", (5595, 5638), False, 'import os\n'), ((6249, 6269), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (6267, 6269), False, 'import unittest\n'), ((1202, 1219), 'platform.system', 'platform.system', ([], {}), '()\n', (1217, 1219), False, 'import platform\n'), ((7724, 7744), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (7742, 7744), False, 'import unittest\n'), ((11222, 11256), 'unittest.makeSuite', 'unittest.makeSuite', (['klass', '"""check"""'], {}), "(klass, 'check')\n", (11240, 11256), False, 'import unittest\n'), ((13188, 13235), 'relstorage.storage.RelStorage', 'RelStorage', (['adapter'], {'name': 'name', 'options': 'options'}), '(adapter, name=name, options=options)\n', (13198, 13235), False, 'from relstorage.storage import RelStorage\n'), ((13763, 14082), 'relstorage.tests.blob.testblob.storage_reusable_suite', 'storage_reusable_suite', (['prefix', 'create_storage'], {'keep_history': 'keep_history', 'test_blob_storage_recovery': '(True)', 'test_packing': 'test_packing', 'test_undo': 'keep_history', 'test_blob_cache': '(not shared_blob_dir)', 'large_blob_size': '(not shared_blob_dir and self.large_blob_size + 100)', 'storage_is_available': 'driver_available'}), '(prefix, create_storage, keep_history=keep_history,\n test_blob_storage_recovery=True, test_packing=test_packing, test_undo=\n keep_history, test_blob_cache=not shared_blob_dir, large_blob_size=not\n shared_blob_dir and self.large_blob_size + 100, storage_is_available=\n driver_available)\n', (13785, 14082), False, 'from relstorage.tests.blob.testblob import storage_reusable_suite\n'), ((12840, 12865), 'os.path.abspath', 'os.path.abspath', (['blob_dir'], {}), '(blob_dir)\n', (12855, 12865), False, 'import os\n')] |
'''
MIT License
Copyright (c) 2020 Autonomous Vision Group (AVG), Max Planck Institute for Intelligent Systems Tübingen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
# Copyright (c) 2020,21 NVIDIA CORPORATION & AFFILIATES.. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
The functions in file is mostly borrowed from
https://github.com/autonomousvision/differentiable_volumetric_rendering/blob/11542ed5ac4e7e4c19c5c74eba7929c1333f3896/im2mesh/dvr/models/__init__.py
with some modifications.
Codes released under MIT license
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from .decoder import Decoder
from .conv import Resnet18
import numpy as np
########################################################
class DVR(nn.Module):
''' DVR model class.
Args:
decoder (nn.Module): decoder network
encoder (nn.Module): encoder network
template (torch.FloatTensor): of shape (num_vertices, 3), template mesh
'''
def __init__(self, template):
super(DVR, self).__init__()
decoder = Decoder(dim=3,
c_dim=256,
leaky=True,
out_dim=6,
res0=True,
res0ini=torch.ones)
encoder = Resnet18(c_dim=256, normalize=True, use_linear=True)
self.decoder = decoder
self.encoder = encoder
self.template = nn.Parameter(template, requires_grad=False)
# learn the delta
residual_coef = torch.zeros(1)
self.residual_coef = nn.Parameter(residual_coef)
def forward(self, inputs_bx3xhxw):
# encode inputs
c_bxc = self.encoder(inputs_bx3xhxw)
pred_bxpxk = self.decoder(self.template, c=c_bxc)
rgb = pred_bxpxk[:, :, :3]
rgb = F.sigmoid(rgb)
delta = pred_bxpxk[:, :, 3:6]
p = self.template + self.residual_coef * delta
return p, delta, rgb
| [
"torch.nn.Parameter",
"torch.zeros",
"torch.nn.functional.sigmoid"
] | [((2935, 2978), 'torch.nn.Parameter', 'nn.Parameter', (['template'], {'requires_grad': '(False)'}), '(template, requires_grad=False)\n', (2947, 2978), True, 'import torch.nn as nn\n'), ((3030, 3044), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (3041, 3044), False, 'import torch\n'), ((3074, 3101), 'torch.nn.Parameter', 'nn.Parameter', (['residual_coef'], {}), '(residual_coef)\n', (3086, 3101), True, 'import torch.nn as nn\n'), ((3320, 3334), 'torch.nn.functional.sigmoid', 'F.sigmoid', (['rgb'], {}), '(rgb)\n', (3329, 3334), True, 'import torch.nn.functional as F\n')] |
"""
Slixmpp: The Slick XMPP Library
Copyright (C) 2012 <NAME>, <NAME>
This file is part of Slixmpp.
See the file LICENSE for copying permission.
"""
import logging
from slixmpp.xmlstream import register_stanza_plugin
from slixmpp.plugins.base import BasePlugin, register_plugin
log = logging.getLogger(__name__)
class XEP_0223(BasePlugin):
"""
XEP-0223: Persistent Storage of Private Data via PubSub
"""
name = 'xep_0223'
description = 'XEP-0223: Persistent Storage of Private Data via PubSub'
dependencies = {'xep_0163', 'xep_0060', 'xep_0004'}
profile = {'pubsub#persist_items': True,
'pubsub#access_model': 'whitelist'}
def configure(self, node, ifrom=None, callback=None, timeout=None):
"""
Update a node's configuration to match the public storage profile.
"""
# TODO: that cannot possibly work, why is this here?
config = self.xmpp['xep_0004'].Form()
config['type'] = 'submit'
for field, value in self.profile.items():
config.add_field(var=field, value=value)
return self.xmpp['xep_0060'].set_node_config(None, node, config,
ifrom=ifrom,
callback=callback,
timeout=timeout)
def store(self, stanza, node=None, id=None, ifrom=None, options=None,
callback=None, timeout=None, timeout_callback=None):
"""
Store private data via PEP.
This is just a (very) thin wrapper around the XEP-0060 publish()
method to set the defaults expected by PEP.
Arguments:
stanza -- The private content to store.
node -- The node to publish the content to. If not specified,
the stanza's namespace will be used.
id -- Optionally specify the ID of the item.
options -- Publish options to use, which will be modified to
fit the persistent storage option profile.
ifrom -- Specify the sender's JID.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to slixmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
if not options:
options = self.xmpp['xep_0004'].stanza.Form()
options['type'] = 'submit'
options.add_field(
var='FORM_TYPE',
ftype='hidden',
value='http://jabber.org/protocol/pubsub#publish-options')
fields = options['fields']
for field, value in self.profile.items():
if field not in fields:
options.add_field(var=field)
options.get_fields()[field]['value'] = value
return self.xmpp['xep_0163'].publish(stanza, node, options=options,
ifrom=ifrom, callback=callback,
timeout=timeout,
timeout_callback=timeout_callback)
def retrieve(self, node, id=None, item_ids=None, ifrom=None,
callback=None, timeout=None, timeout_callback=None):
"""
Retrieve private data via PEP.
This is just a (very) thin wrapper around the XEP-0060 publish()
method to set the defaults expected by PEP.
Arguments:
node -- The node to retrieve content from.
id -- Optionally specify the ID of the item.
item_ids -- Specify a group of IDs. If id is also specified, it
will be included in item_ids.
ifrom -- Specify the sender's JID.
timeout -- The length of time (in seconds) to wait for a response
before exiting the send call if blocking is used.
Defaults to slixmpp.xmlstream.RESPONSE_TIMEOUT
callback -- Optional reference to a stream handler function. Will
be executed when a reply stanza is received.
"""
if item_ids is None:
item_ids = []
if id is not None:
item_ids.append(id)
return self.xmpp['xep_0060'].get_items(None, node,
item_ids=item_ids, ifrom=ifrom,
callback=callback, timeout=timeout,
timeout_callback=timeout_callback)
register_plugin(XEP_0223)
| [
"logging.getLogger",
"slixmpp.plugins.base.register_plugin"
] | [((305, 332), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (322, 332), False, 'import logging\n'), ((4807, 4832), 'slixmpp.plugins.base.register_plugin', 'register_plugin', (['XEP_0223'], {}), '(XEP_0223)\n', (4822, 4832), False, 'from slixmpp.plugins.base import BasePlugin, register_plugin\n')] |
#! python3
# Combines all the pafs in the current working directory into a single pdf
import PyPDF2, os, sys, logging
class Merge ():
"""
Merge all pdfs in the current folder, or specific list of files,
by name, into a single pdf file
"""
def __init__ (self, file_output = "", replace = False, debug = False):
"""
Constructor of class. Generate empty list of files an get dir path and file ouput
"""
# Debug configuration
logging.basicConfig( level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s' )
if not debug:
logging.disable()
self.pdfFiles = []
self.fileOutput = file_output
self.replace = replace
self.__verify_outputh_file()
def merge_file_list (self, file_list):
"""
Merge a specific list of pdf files inside the output file
"""
# verify attribs
if type (file_list) != list:
raise AttributeError (file_list)
self.pdfFiles = file_list
# Short files
self.pdfFiles.sort(key = str.lower)
self.__make_file()
def merge_folder (self, folder):
"""
Merge all files from a specific folder and save inside the output file
"""
# Verify is folder exist
if not os.path.isdir (folder):
raise FileNotFoundError(folder)
# Get files
for filename in os.listdir(folder):
if filename.endswith('.pdf'):
self.pdfFiles.append(os.path.join(folder, filename))
# Order files
self.pdfFiles.sort(key = str.lower)
self.__make_file()
def __verify_outputh_file (self):
"""
Verify the name of the output file and if the file will be replace or not
"""
# verify path and make file name
if os.path.isdir (self.fileOutput):
self.fileOutput = os.path.join(self.fileOutput, 'mergeFiles.pdf')
else:
if not self.fileOutput.endswith('.pdf'):
self.fileOutput += '.pdf'
# Verify replca outputh file
if os.path.isfile(self.fileOutput):
if self.replace:
logging.debug ("Replacing file")
else:
self.fileOutput = 'File "{}" already exist'.format (self.fileOutput)
raise ValueError(self.fileOutput)
def __make_file (self):
"""
Make pdf output file with each page of the file list
"""
pdfWriter = PyPDF2.PdfFileWriter()
# loop through all the pdf files
if self.pdfFiles:
for currentFile in self.pdfFiles:
pdfFileObj = open (currentFile, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
# loop through all the pages (except the first) and add them
logging.debug ("Merging {}... ".format (currentFile))
if pdfReader.numPages:
for pageNum in range (0, pdfReader.numPages):
pageObj = pdfReader.getPage(pageNum)
pdfWriter.addPage (pageObj)
# Save the resulting pdf to a file
pdfOutput = open (self.fileOutput, 'wb')
pdfWriter.write(pdfOutput)
pdfOutput.close()
logging.debug ('Done. Pages are now in {} file'.format (os.path.basename(self.fileOutput)))
else:
logging.debug ("Dosent exist pdf files in this folder.")
| [
"logging.basicConfig",
"os.listdir",
"logging.debug",
"os.path.join",
"os.path.isfile",
"os.path.isdir",
"os.path.basename",
"PyPDF2.PdfFileWriter",
"PyPDF2.PdfFileReader",
"logging.disable"
] | [((488, 586), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '""" %(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.DEBUG, format=\n ' %(asctime)s - %(levelname)s - %(message)s')\n", (507, 586), False, 'import PyPDF2, os, sys, logging\n'), ((1462, 1480), 'os.listdir', 'os.listdir', (['folder'], {}), '(folder)\n', (1472, 1480), False, 'import PyPDF2, os, sys, logging\n'), ((1896, 1926), 'os.path.isdir', 'os.path.isdir', (['self.fileOutput'], {}), '(self.fileOutput)\n', (1909, 1926), False, 'import PyPDF2, os, sys, logging\n'), ((2168, 2199), 'os.path.isfile', 'os.path.isfile', (['self.fileOutput'], {}), '(self.fileOutput)\n', (2182, 2199), False, 'import PyPDF2, os, sys, logging\n'), ((2570, 2592), 'PyPDF2.PdfFileWriter', 'PyPDF2.PdfFileWriter', ([], {}), '()\n', (2590, 2592), False, 'import PyPDF2, os, sys, logging\n'), ((619, 636), 'logging.disable', 'logging.disable', ([], {}), '()\n', (634, 636), False, 'import PyPDF2, os, sys, logging\n'), ((1340, 1361), 'os.path.isdir', 'os.path.isdir', (['folder'], {}), '(folder)\n', (1353, 1361), False, 'import PyPDF2, os, sys, logging\n'), ((1960, 2007), 'os.path.join', 'os.path.join', (['self.fileOutput', '"""mergeFiles.pdf"""'], {}), "(self.fileOutput, 'mergeFiles.pdf')\n", (1972, 2007), False, 'import PyPDF2, os, sys, logging\n'), ((3517, 3572), 'logging.debug', 'logging.debug', (['"""Dosent exist pdf files in this folder."""'], {}), "('Dosent exist pdf files in this folder.')\n", (3530, 3572), False, 'import PyPDF2, os, sys, logging\n'), ((2247, 2278), 'logging.debug', 'logging.debug', (['"""Replacing file"""'], {}), "('Replacing file')\n", (2260, 2278), False, 'import PyPDF2, os, sys, logging\n'), ((2791, 2823), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (2811, 2823), False, 'import PyPDF2, os, sys, logging\n'), ((1562, 1592), 'os.path.join', 'os.path.join', (['folder', 'filename'], {}), '(folder, filename)\n', (1574, 1592), False, 'import PyPDF2, os, sys, logging\n'), ((3454, 3487), 'os.path.basename', 'os.path.basename', (['self.fileOutput'], {}), '(self.fileOutput)\n', (3470, 3487), False, 'import PyPDF2, os, sys, logging\n')] |
"""Index related entity names
Revision ID: 323f8d77567b
Revises: 82b34e2<PASSWORD>
Create Date: 2016-11-16 13:00:25.782487
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '82b34e2777a4'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_related_entity_name'), 'related_entity', ['name'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_related_entity_name'), table_name='related_entity')
### end Alembic commands ###
| [
"alembic.op.f"
] | [((370, 400), 'alembic.op.f', 'op.f', (['"""ix_related_entity_name"""'], {}), "('ix_related_entity_name')\n", (374, 400), False, 'from alembic import op\n'), ((578, 608), 'alembic.op.f', 'op.f', (['"""ix_related_entity_name"""'], {}), "('ix_related_entity_name')\n", (582, 608), False, 'from alembic import op\n')] |
import random
high_score = 0
def dice_game():
global high_score
while True:
print("Current High Score: ", high_score)
print("1) Roll Dice")
print("2) Leave Game")
choice = input("Enter your choice: ")
if choice == "2":
print("Goodbye")
break
elif choice == "1":
die1 = random.randint(1, 6)
die2 = random.randint(1, 6)
total = die1 + die2
print("You roll a... ", die1)
print("You roll a... ", die2)
print("You have rolled a total of: ", total)
if total > high_score:
high_score = total
print("New high score!")
else:
continue
dice_game()
| [
"random.randint"
] | [((367, 387), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (381, 387), False, 'import random\n'), ((407, 427), 'random.randint', 'random.randint', (['(1)', '(6)'], {}), '(1, 6)\n', (421, 427), False, 'import random\n')] |
# Author <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BertClassifier."""
from mptb import BertClassifier
def classification(
config_path='config/bert_base.json',
train_dataset_path='tests/sample_text_class.txt',
eval_dataset_path='tests/sample_text_class.txt',
pretrain_path='pretrain/bert.pt',
tf_pretrain_path=None,
model_path=None,
vocab_path='tests/sample_text.vocab',
sp_model_path='tests/sample_text.model',
save_dir='classifier/',
log_dir=None,
batch_size=2,
max_pos=128,
lr=5e-5,
warmup_proportion=0.1, # warmup_steps = len(dataset) / batch_size * epoch * warmup_proportion
epochs=5,
per_save_epochs=1,
mode='train',
label_num=-1,
balance_weight=False,
balance_sample=False,
under_sampling=False,
under_sampling_cycle=False,
tokenizer_name='google',
read_head=False,
fp16=False,
task='class',
device=None,
quantize=False,
model_name='bert',
optimizer='bert',
encoder_json_path=None,
vocab_bpe_path=None,
sw_log_dir='runs'
):
if under_sampling_cycle:
under_sampling = True
if mode == 'train':
estimator = BertClassifier(
config_path=config_path,
max_pos=max_pos,
vocab_path=vocab_path,
sp_model_path=sp_model_path,
pretrain_path=pretrain_path,
tf_pretrain_path=tf_pretrain_path,
dataset_path=train_dataset_path,
header_skip=not read_head,
label_num=label_num,
tokenizer_name=tokenizer_name,
under_sampling=under_sampling,
fp16=fp16,
task=task,
device=device,
quantize=quantize,
model_name=model_name,
encoder_json_path=encoder_json_path,
vocab_bpe_path=vocab_bpe_path,
sw_log_dir=sw_log_dir
)
estimator.train(
traing_model_path=model_path,
batch_size=batch_size,
epochs=epochs,
lr=lr, warmup_proportion=warmup_proportion,
balance_weight=balance_weight,
balance_sample=balance_sample,
under_sampling_cycle=under_sampling_cycle,
save_dir=save_dir,
per_save_epochs=per_save_epochs,
optimizer_name=optimizer
)
if eval_dataset_path is None:
return
eval_data_set = estimator.get_dataset(
dataset_path=eval_dataset_path, header_skip=not read_head)
score = estimator.evaluate(dataset=eval_data_set, batch_size=batch_size, log_dir=log_dir)
print(score)
else:
estimator = BertClassifier(
config_path=config_path,
max_pos=max_pos,
vocab_path=vocab_path,
sp_model_path=sp_model_path,
model_path=model_path,
dataset_path=eval_dataset_path,
header_skip=not read_head,
label_num=label_num,
tokenizer_name=tokenizer_name,
under_sampling=under_sampling,
fp16=fp16,
device=device,
quantize=quantize,
model_name=model_name,
encoder_json_path=encoder_json_path,
vocab_bpe_path=vocab_bpe_path,
)
score = estimator.evaluate(batch_size=batch_size, log_dir=log_dir)
print(score)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='BERT classification.', usage='%(prog)s [options]')
parser.add_argument('--config_path', help='JSON file path for defines networks.', nargs='?',
type=str, default='config/bert_base.json')
parser.add_argument('--train_dataset_path', help='Training Dataset file (TSV file) path for classification.',
nargs='?', type=str, default=None)
parser.add_argument('--eval_dataset_path', help='Evaluate Dataset file (TSV file) path for classification.',
nargs='?', type=str, default=None)
parser.add_argument('--pretrain_path', help='Pre-training PyTorch model path.', nargs='?',
type=str, default=None)
parser.add_argument('--tf_pretrain_path', help='Pre-training TensorFlow(Google) model path.', nargs='?',
type=str, default=None)
parser.add_argument('--model_path', help='Classifier PyTorch model path.', nargs='?',
type=str, default=None)
parser.add_argument('--vocab_path', help='Vocabulary file path for BERT to pre-training.', nargs='?', required=True,
type=str)
parser.add_argument('--sp_model_path', help='Trained SentencePiece model path.', nargs='?',
type=str, default=None)
parser.add_argument('--save_dir', help='Classification model saving directory path.', nargs='?',
type=str, default='classifier/')
parser.add_argument('--log_dir', help='Logging file path.', nargs='?',
type=str, default=None)
parser.add_argument('--batch_size', help='Batch size', nargs='?',
type=int, default=4)
parser.add_argument('--max_pos', help='The maximum sequence length for BERT (slow as big).', nargs='?',
type=int, default=512)
parser.add_argument('--lr', help='Learning rate', nargs='?',
type=float, default=2e-5)
parser.add_argument('--warmup_steps', help='Warm-up steps proportion.', nargs='?',
type=float, default=0.1)
parser.add_argument('--epochs', help='Epochs', nargs='?',
type=int, default=10)
parser.add_argument('--per_save_epochs', help=
'Saving training model timing is the number divided by the epoch number', nargs='?',
type=int, default=1)
parser.add_argument('--mode', help='train or eval', nargs='?',
type=str, default='train')
parser.add_argument('--label_num', help='labels number', nargs='?',
type=int, default=-1)
parser.add_argument('--balance_weight', action='store_true',
help='Use automatically adjust weights')
parser.add_argument('--balance_sample', action='store_true',
help='Use automatically adjust samples(random)')
parser.add_argument('--under_sampling', action='store_true',
help='Use automatically adjust under samples')
parser.add_argument('--under_sampling_cycle', action='store_true',
help='Use automatically adjust under samples cycle peer')
parser.add_argument('--tokenizer', nargs='?', type=str, default='google',
help=
'Select from the following name groups tokenizer that uses only vocabulary files.(mecab, juman)'
)
parser.add_argument('--read_head', action='store_true',
help='Use not include header TSV file')
parser.add_argument('--fp16', action='store_true',
help='Use nVidia fp16 (require apex module)')
parser.add_argument('--task', nargs='?', type=str, default='class', help='Target Task (class or choice)')
parser.add_argument('--device', nargs='?', type=str, default=None, help='Target Runing device name.')
parser.add_argument('--quantize', action='store_true',
help='Use quantized bert (testing),')
parser.add_argument('--model_name', nargs='?', type=str, default='bert',
help=
'Select from the following name groups model. (bert, proj, albert)'
)
parser.add_argument('--optimizer', nargs='?', type=str, default='bert',
help=
'Select from the following name groups optimizer. (bert, adamw, lamb)'
)
parser.add_argument('--encoder_json_path', help='GPT2 encoder JSON file path.', nargs='?', type=str)
parser.add_argument('--vocab_bpe_path', help='GPT2 encoder bpe file path.', nargs='?', type=str)
parser.add_argument('--sw_log_dir', help='TensorBoard lgo_dir path.', nargs='?', type=str, default='runs')
args = parser.parse_args()
classification(
config_path=args.config_path,
train_dataset_path=args.train_dataset_path,
eval_dataset_path=args.eval_dataset_path,
pretrain_path= args.pretrain_path,
tf_pretrain_path=args.tf_pretrain_path,
model_path=args.model_path,
vocab_path=args.vocab_path,
sp_model_path=args.sp_model_path,
save_dir=args.save_dir,
log_dir=args.log_dir,
batch_size=args.batch_size,
max_pos=args.max_pos,
lr=args.lr,
warmup_proportion=args.warmup_steps,
epochs=args.epochs,
per_save_epochs=args.per_save_epochs,
mode=args.mode,
label_num=args.label_num,
balance_weight=args.balance_weight,
balance_sample=args.balance_sample,
under_sampling=args.under_sampling,
under_sampling_cycle=args.under_sampling_cycle,
tokenizer_name=args.tokenizer,
read_head=args.read_head,
fp16=args.fp16,
task=args.task,
device=args.device,
quantize=args.quantize,
model_name=args.model_name,
optimizer=args.optimizer,
encoder_json_path=args.encoder_json_path,
vocab_bpe_path=args.vocab_bpe_path,
sw_log_dir=args.sw_log_dir
)
| [
"mptb.BertClassifier",
"argparse.ArgumentParser"
] | [((3956, 4048), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""BERT classification."""', 'usage': '"""%(prog)s [options]"""'}), "(description='BERT classification.', usage=\n '%(prog)s [options]')\n", (3979, 4048), False, 'import argparse\n'), ((1685, 2200), 'mptb.BertClassifier', 'BertClassifier', ([], {'config_path': 'config_path', 'max_pos': 'max_pos', 'vocab_path': 'vocab_path', 'sp_model_path': 'sp_model_path', 'pretrain_path': 'pretrain_path', 'tf_pretrain_path': 'tf_pretrain_path', 'dataset_path': 'train_dataset_path', 'header_skip': '(not read_head)', 'label_num': 'label_num', 'tokenizer_name': 'tokenizer_name', 'under_sampling': 'under_sampling', 'fp16': 'fp16', 'task': 'task', 'device': 'device', 'quantize': 'quantize', 'model_name': 'model_name', 'encoder_json_path': 'encoder_json_path', 'vocab_bpe_path': 'vocab_bpe_path', 'sw_log_dir': 'sw_log_dir'}), '(config_path=config_path, max_pos=max_pos, vocab_path=\n vocab_path, sp_model_path=sp_model_path, pretrain_path=pretrain_path,\n tf_pretrain_path=tf_pretrain_path, dataset_path=train_dataset_path,\n header_skip=not read_head, label_num=label_num, tokenizer_name=\n tokenizer_name, under_sampling=under_sampling, fp16=fp16, task=task,\n device=device, quantize=quantize, model_name=model_name,\n encoder_json_path=encoder_json_path, vocab_bpe_path=vocab_bpe_path,\n sw_log_dir=sw_log_dir)\n', (1699, 2200), False, 'from mptb import BertClassifier\n'), ((3185, 3616), 'mptb.BertClassifier', 'BertClassifier', ([], {'config_path': 'config_path', 'max_pos': 'max_pos', 'vocab_path': 'vocab_path', 'sp_model_path': 'sp_model_path', 'model_path': 'model_path', 'dataset_path': 'eval_dataset_path', 'header_skip': '(not read_head)', 'label_num': 'label_num', 'tokenizer_name': 'tokenizer_name', 'under_sampling': 'under_sampling', 'fp16': 'fp16', 'device': 'device', 'quantize': 'quantize', 'model_name': 'model_name', 'encoder_json_path': 'encoder_json_path', 'vocab_bpe_path': 'vocab_bpe_path'}), '(config_path=config_path, max_pos=max_pos, vocab_path=\n vocab_path, sp_model_path=sp_model_path, model_path=model_path,\n dataset_path=eval_dataset_path, header_skip=not read_head, label_num=\n label_num, tokenizer_name=tokenizer_name, under_sampling=under_sampling,\n fp16=fp16, device=device, quantize=quantize, model_name=model_name,\n encoder_json_path=encoder_json_path, vocab_bpe_path=vocab_bpe_path)\n', (3199, 3616), False, 'from mptb import BertClassifier\n')] |
from django.urls import path
from dev.views import FindMyIp,FindMyGps
app_name = 'dev'
urlpatterns = [
# path('', Main.as_view(), name = 'index'),
path('findmyip', FindMyIp.as_view(), name = 'findmyip'),
path('findmygps', FindMyGps.as_view(), name = 'findmygps'),
] | [
"dev.views.FindMyGps.as_view",
"dev.views.FindMyIp.as_view"
] | [((174, 192), 'dev.views.FindMyIp.as_view', 'FindMyIp.as_view', ([], {}), '()\n', (190, 192), False, 'from dev.views import FindMyIp, FindMyGps\n'), ((236, 255), 'dev.views.FindMyGps.as_view', 'FindMyGps.as_view', ([], {}), '()\n', (253, 255), False, 'from dev.views import FindMyIp, FindMyGps\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Topic: 处理html和xml文本
Desc :
"""
import html
def html_xml():
s = 'Elements are written as "<tag>text</tag>".'
print(s)
print(html.escape(s))
# Disable escaping of quotes
print(html.escape(s, quote=False))
s = 'Spicy Jalapeño'
print(s.encode('ascii', errors='xmlcharrefreplace'))
if __name__ == '__main__':
html_xml()
| [
"html.escape"
] | [((190, 204), 'html.escape', 'html.escape', (['s'], {}), '(s)\n', (201, 204), False, 'import html\n'), ((250, 277), 'html.escape', 'html.escape', (['s'], {'quote': '(False)'}), '(s, quote=False)\n', (261, 277), False, 'import html\n')] |
import csv, pylab as pl, re
DB = dict();
BD = dict();
whales_ = [];
classes = [];
line_num = 0;
with open('data/train.csv', 'rb') as train_class_data:
data = csv.reader(train_class_data, delimiter=',');
for line in data:
if (line_num == 0):
line_num += 1;
continue;
keys = DB.keys();
syek = BD.keys();
pic_name = line[0];
class_name = line[1];
whales_.append(int(re.sub('w_','',re.sub('.jpg','',pic_name))));
if (class_name not in keys):
DB[class_name] = [pic_name];
classes.append(int(re.sub('whale_','',class_name)));
else:
DB[class_name].append(pic_name);
BD[pic_name] = class_name;
keys = DB.keys();
N = len(keys);
frequency_table = [0 for i in xrange(N)];
for i in xrange(N):
frequency_table[i] = len(DB[keys[i]]);
pl.plot(frequency_table);
M = len(whales_);
match_table = [[0 for j in xrange(N+1)] for i in xrange(M+1)];
for j in xrange(N):
match_table[0][j+1] = classes[j];
for i in xrange(M):
match_table[i+1][0] = whales_[i];
for i in xrange(N):
for j in xrange(M):
strWhale = 'w_'+str(whales_[j])+'.jpg';
num_zero = 0;
if (classes[i] < 10):
num_zero += 4;
elif (classes[i] < 100):
num_zero += 3;
elif (classes[i] < 1000):
num_zero += 2;
elif (classes[i] < 10000):
num_zero += 1;
zeros = num_zero*'0';
strClass = 'whale_'+zeros+str(classes[i]);
if (strWhale in DB[strClass]):
match_table[j+1][i+1] = 1;
match_table = pl.array(match_table);
pl.savetxt('data/match_table.csv', match_table, delimiter=',');
target_matrix = pl.array([[0 for j in xrange(M)] for j in xrange(M)]);
i = 0;
for pic_name_i in whales_:
j = 0;
for pic_name_j in whales_:
class_of_i = BD['w_'+str(pic_name_i)+'.jpg'];
class_of_j = BD['w_'+str(pic_name_j)+'.jpg'];
if (class_of_i == class_of_j):
target_matrix[i,j] = 1;
j += 1;
target_matrix[i,i] = 1;
i += 1;
new_train_numerical = pl.array([[0 for it1 in xrange(2)] for it2 in xrange(M)]);
for i in xrange(M):
whale = whales_[i];
new_train_numerical[i,0] = whale;
class_ = class_of_i = BD['w_'+str(whale)+'.jpg'];
new_train_numerical[i,1] = int(re.sub('whale_','',class_));
pl.savetxt('data/target_matrix.csv', target_matrix, delimiter=',');
pl.savetxt('data/train_numer.csv', new_train_numerical, delimiter=','); | [
"pylab.plot",
"pylab.array",
"pylab.savetxt",
"re.sub",
"csv.reader"
] | [((880, 904), 'pylab.plot', 'pl.plot', (['frequency_table'], {}), '(frequency_table)\n', (887, 904), True, 'import csv, pylab as pl, re\n'), ((1637, 1658), 'pylab.array', 'pl.array', (['match_table'], {}), '(match_table)\n', (1645, 1658), True, 'import csv, pylab as pl, re\n'), ((1660, 1722), 'pylab.savetxt', 'pl.savetxt', (['"""data/match_table.csv"""', 'match_table'], {'delimiter': '""","""'}), "('data/match_table.csv', match_table, delimiter=',')\n", (1670, 1722), True, 'import csv, pylab as pl, re\n'), ((2403, 2469), 'pylab.savetxt', 'pl.savetxt', (['"""data/target_matrix.csv"""', 'target_matrix'], {'delimiter': '""","""'}), "('data/target_matrix.csv', target_matrix, delimiter=',')\n", (2413, 2469), True, 'import csv, pylab as pl, re\n'), ((2471, 2541), 'pylab.savetxt', 'pl.savetxt', (['"""data/train_numer.csv"""', 'new_train_numerical'], {'delimiter': '""","""'}), "('data/train_numer.csv', new_train_numerical, delimiter=',')\n", (2481, 2541), True, 'import csv, pylab as pl, re\n'), ((164, 207), 'csv.reader', 'csv.reader', (['train_class_data'], {'delimiter': '""","""'}), "(train_class_data, delimiter=',')\n", (174, 207), False, 'import csv, pylab as pl, re\n'), ((2373, 2401), 're.sub', 're.sub', (['"""whale_"""', '""""""', 'class_'], {}), "('whale_', '', class_)\n", (2379, 2401), False, 'import csv, pylab as pl, re\n'), ((460, 488), 're.sub', 're.sub', (['""".jpg"""', '""""""', 'pic_name'], {}), "('.jpg', '', pic_name)\n", (466, 488), False, 'import csv, pylab as pl, re\n'), ((600, 632), 're.sub', 're.sub', (['"""whale_"""', '""""""', 'class_name'], {}), "('whale_', '', class_name)\n", (606, 632), False, 'import csv, pylab as pl, re\n')] |
import numpy as np
import matplotlib.pyplot as plt
#Dahlquist test
#sol1ex = lambda t: np.exp(-t)
#sol2ex = lambda t: np.exp(-2*t)
#oscillator 1
sol1ex = lambda t: np.cos(t**2/2)
sol2ex = lambda t: np.sin(t**2/2)
#oscillator 2
#sol1ex = lambda t: np.exp(np.sin(t**2))
#sol2ex = lambda t: np.exp(np.cos(t**2))
name = 'Osc1'
t = np.fromfile('../out/%s_snap_t' % name)
nsnap = len(t)
sol1 = np.zeros((nsnap,))
sol2 = sol1.copy()
for i in range(nsnap):
s = np.fromfile('../out/%s_snap_%d' % (name,i))
sol1[i] = s[0]
sol2[i] = s[1]
fig, axs = plt.subplots(2, 3, figsize=(10,5))
axs = [item for sublist in axs for item in sublist]
tdense = np.linspace(min(t), max(t), 2500)
axs[0].plot(tdense, sol1ex(tdense), 'k', linewidth=0.5, label='$y_1$ exact')
axs[0].plot(t, sol1, 'C0.', label='$y_1$ numerical')
axs[0].set_title('Solutions')
axs[0].set_ylabel('$y_1$')
axs[0].legend()
axs[3].plot(tdense, sol2ex(tdense), 'k', linewidth=0.5, label='$y_2$ exact')
axs[3].plot(t, sol2, 'C1.', label='$y_2$ numerical')
axs[3].set_ylabel('$y_2$')
axs[3].legend()
axs[1].semilogy(t, np.abs(sol1 - sol1ex(t)), 'C0.', label='$y_1$ abs err')
axs[4].semilogy(t, np.abs(sol2 - sol2ex(t)), 'C1.', label='$y_2$ abs err')
axs[1].set_title('Absolute Error')
axs[2].semilogy(t, np.abs((sol1 - sol1ex(t))/sol1ex(t)), 'C0.', label='$y_1$ rel err')
axs[5].semilogy(t, np.abs((sol2 - sol2ex(t))/sol1ex(t)), 'C1.', label='$y_2$ rel err')
axs[2].set_title('Relative Error')
axs[3].set_xlabel('t')
axs[4].set_xlabel('t')
axs[5].set_xlabel('t')
plt.tight_layout()
plt.show()
| [
"numpy.fromfile",
"numpy.zeros",
"numpy.cos",
"matplotlib.pyplot.tight_layout",
"numpy.sin",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.show"
] | [((329, 367), 'numpy.fromfile', 'np.fromfile', (["('../out/%s_snap_t' % name)"], {}), "('../out/%s_snap_t' % name)\n", (340, 367), True, 'import numpy as np\n'), ((390, 408), 'numpy.zeros', 'np.zeros', (['(nsnap,)'], {}), '((nsnap,))\n', (398, 408), True, 'import numpy as np\n'), ((553, 588), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {'figsize': '(10, 5)'}), '(2, 3, figsize=(10, 5))\n', (565, 588), True, 'import matplotlib.pyplot as plt\n'), ((1528, 1546), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1544, 1546), True, 'import matplotlib.pyplot as plt\n'), ((1547, 1557), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1555, 1557), True, 'import matplotlib.pyplot as plt\n'), ((165, 183), 'numpy.cos', 'np.cos', (['(t ** 2 / 2)'], {}), '(t ** 2 / 2)\n', (171, 183), True, 'import numpy as np\n'), ((199, 217), 'numpy.sin', 'np.sin', (['(t ** 2 / 2)'], {}), '(t ** 2 / 2)\n', (205, 217), True, 'import numpy as np\n'), ((459, 503), 'numpy.fromfile', 'np.fromfile', (["('../out/%s_snap_%d' % (name, i))"], {}), "('../out/%s_snap_%d' % (name, i))\n", (470, 503), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""Interpret an L-String as a set of 3D Turtle commands and record the turtle's path.
Multiple lines of input will be treated as a continuation of a single L-String.
Default commandset:
F,G - Step forward while drawing
f,g - Step forward without drawing
-,+ - Yaw around the normal axis
v,^ - Pitch around the transverse axis
<,> - Roll around the longitudinal axis
| - Flip orientation 180 degrees
d,D - Turn drawing on, off
[,] - Push, pop position and orientation onto a stack
"""
import argparse
import logging
import pathlib
import sys
root = pathlib.Path(__file__).resolve().parent.parent
sys.path.insert(0, str(root))
from generative.lsystem.interpreter import LSystemInterpeter
from generative.wkio import serialize_geometries
LOG_LEVELS = {
"CRITICAL": logging.CRITICAL,
"ERROR": logging.ERROR,
"WARNING": logging.WARNING,
"INFO": logging.INFO,
"DEBUG": logging.DEBUG,
}
DEFAULT_LEVEL = "WARNING"
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
"--input",
"-i",
type=argparse.FileType("r"),
default=sys.stdin,
help="A file containing the L-String to interpret. Defaults to stdin.",
)
parser.add_argument(
"--output",
"-o",
# TODO: I seem to not be able to open stdout in binary mode.
# See: https://github.com/python/cpython/pull/13165
# Potential workaround: open in 'wb' mode, and default to sys.stdout.buffer.
type=argparse.FileType("w"),
default=sys.stdout,
help="A file to output the expanded axiom to. Defaults to stdout.",
)
parser.add_argument(
"--commandset",
"-c",
type=str,
default="default",
choices=LSystemInterpeter.commandsets,
help="The commandset to use to interpret the given L-String. Defaults to 'default'.",
)
parser.add_argument(
"--stepsize",
"-s",
type=float,
default=1.0,
help="The step size for the turtle's forward motion. Defaults to 1.0.",
)
parser.add_argument(
"--angle",
"-a",
type=float,
default=45.0,
help="The angle in degrees used for the turtle's orientation modifications. Defaults to 45.",
)
parser.add_argument(
"--output-format",
"-O",
type=str,
default="wkt",
choices=["wkt", "wkb", "flat"],
help="The output format for the turtle path. Defaults to WKT.",
)
parser.add_argument(
"-l",
"--log-level",
type=str,
default=DEFAULT_LEVEL,
choices=LOG_LEVELS.keys(),
help=f"Set the logging output level. Defaults to {DEFAULT_LEVEL}.",
)
return parser.parse_args()
def main(args):
interpreter = LSystemInterpeter(args.commandset, args.stepsize, args.angle)
tokens = interpreter.tokenize(args.input)
geometries = interpreter.interpret(tokens)
serialize_geometries(geometries, args.output, args.output_format)
if __name__ == "__main__":
args = parse_args()
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
level=LOG_LEVELS.get(args.log_level),
stream=sys.stderr,
)
logger = logging.getLogger(name=__file__)
main(args)
| [
"logging.getLogger",
"argparse.FileType",
"generative.lsystem.interpreter.LSystemInterpeter",
"argparse.ArgumentParser",
"pathlib.Path",
"generative.wkio.serialize_geometries"
] | [((1021, 1124), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__', 'formatter_class': 'argparse.RawDescriptionHelpFormatter'}), '(description=__doc__, formatter_class=argparse.\n RawDescriptionHelpFormatter)\n', (1044, 1124), False, 'import argparse\n'), ((2935, 2996), 'generative.lsystem.interpreter.LSystemInterpeter', 'LSystemInterpeter', (['args.commandset', 'args.stepsize', 'args.angle'], {}), '(args.commandset, args.stepsize, args.angle)\n', (2952, 2996), False, 'from generative.lsystem.interpreter import LSystemInterpeter\n'), ((3094, 3159), 'generative.wkio.serialize_geometries', 'serialize_geometries', (['geometries', 'args.output', 'args.output_format'], {}), '(geometries, args.output, args.output_format)\n', (3114, 3159), False, 'from generative.wkio import serialize_geometries\n'), ((3402, 3434), 'logging.getLogger', 'logging.getLogger', ([], {'name': '__file__'}), '(name=__file__)\n', (3419, 3434), False, 'import logging\n'), ((1206, 1228), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (1223, 1228), False, 'import argparse\n'), ((1629, 1651), 'argparse.FileType', 'argparse.FileType', (['"""w"""'], {}), "('w')\n", (1646, 1651), False, 'import argparse\n'), ((609, 631), 'pathlib.Path', 'pathlib.Path', (['__file__'], {}), '(__file__)\n', (621, 631), False, 'import pathlib\n')] |
"""
@author: yuboya
"""
### pins position to be sent to robot
## from TransformationCalculation:
import numpy as np
import math
def PointsToRobot(alpha, deltax,deltay,deltaz,xyzc):
sina = math.sin(alpha)
cosa = math.cos(alpha)
pointrs = []
for pointc in xyzc:
# METHOD 2: matrix calculation
pc = pointc.reshape(3,1)
R = np.array([cosa, -sina, 0, sina, cosa, 0, 0,0,1])
R = R.reshape(3,3)
T= np.array([deltax,deltay,deltaz])
T = T.reshape(3,1)
pr = np.dot(np.transpose(R),pc)+T
pointr = pr.reshape(1,3)
pointrs.append(pointr)
return pointrs
| [
"math.cos",
"numpy.array",
"numpy.transpose",
"math.sin"
] | [((222, 237), 'math.sin', 'math.sin', (['alpha'], {}), '(alpha)\n', (230, 237), False, 'import math\n'), ((250, 265), 'math.cos', 'math.cos', (['alpha'], {}), '(alpha)\n', (258, 265), False, 'import math\n'), ((396, 446), 'numpy.array', 'np.array', (['[cosa, -sina, 0, sina, cosa, 0, 0, 0, 1]'], {}), '([cosa, -sina, 0, sina, cosa, 0, 0, 0, 1])\n', (404, 446), True, 'import numpy as np\n'), ((486, 520), 'numpy.array', 'np.array', (['[deltax, deltay, deltaz]'], {}), '([deltax, deltay, deltaz])\n', (494, 520), True, 'import numpy as np\n'), ((568, 583), 'numpy.transpose', 'np.transpose', (['R'], {}), '(R)\n', (580, 583), True, 'import numpy as np\n')] |
import requests
import json
from json import JSONDecodeError
base_uri = "https://api.github.com/"
licenses = ['afl-3.0', 'apache-2.0', 'artistic-2.0', 'bsl-1.0', 'bsd-2-clause', 'license bsd-3-clause', 'bsd-3-clause-clear', 'cc', 'cc0-1.0', 'cc-by-4.0', 'cc-by-sa-4.0', 'wtfpl', 'ecl-2.0', 'epl-1.0', 'epl-2.0', 'eupl-1.1', 'agpl-3.0', 'gpl', 'gpl-2.0', 'gpl-3.0', 'lgpl', 'lgpl-2.1', 'lgpl-3.0', 'isc', 'lppl-1.3c', 'ms-pl', 'mit', 'mpl-2.0', 'osl-3.0', 'postgresql', 'ofl-1.1', 'ncsa', 'unlicense', 'zlib']
def make_request(request):
error = None
if not request['api_key']:
error = dict(msg='Github API Key was not provided! Please either use api_key or use an ENV variable named GITHUB_API_KEY')
return dict(error=error, payload=None, raw=None)
# Remove unnecessary slashes
if request['endpoint'][0:1] == '/':
request['endpoint'] = request['endpoint'][1:]
headers = {
'Authorization': f'token {request["api_key"]}',
'Accept': 'application/vnd.github.v3+json'
}
if 'accept' in request:
headers['Accept'] = request['accept']
uri = '{}{}'.format(base_uri, request['endpoint'])
if 'data' in request.keys() and request['data']:
response = requests.request(request['method'], uri, data=json.dumps(request['data']), headers=headers)
else:
response = requests.request(request['method'], uri, headers=headers)
try:
payload = json.loads(response.text)
except JSONDecodeError:
payload = response.text
if response.reason == 'Unauthorized' and payload['message'] == 'Bad credentials':
error = dict(msg='Failed to authorise due to invalid credentials.')
elif not response.ok:
error = dict(msg=f'Request failed with reason: {response.reason}', payload=payload, raw=response)
return dict(error=error, payload=payload, raw=response)
def get_login(api_key):
request = dict(
api_key=api_key,
method='GET',
endpoint='user'
)
response = make_request(request)
if response['error']:
return None
else:
return response['login']
def repo_exists(api_key, owner, name):
request = dict(
api_key=api_key,
method='GET',
endpoint=f'repos/{owner}/{name}'
)
response = make_request(request)
return not response['error'] and not response['error']['message'] == 'Not Found' | [
"json.loads",
"json.dumps",
"requests.request"
] | [((1361, 1418), 'requests.request', 'requests.request', (["request['method']", 'uri'], {'headers': 'headers'}), "(request['method'], uri, headers=headers)\n", (1377, 1418), False, 'import requests\n'), ((1447, 1472), 'json.loads', 'json.loads', (['response.text'], {}), '(response.text)\n', (1457, 1472), False, 'import json\n'), ((1286, 1313), 'json.dumps', 'json.dumps', (["request['data']"], {}), "(request['data'])\n", (1296, 1313), False, 'import json\n')] |
import os
import tempfile
import transaction
from onegov.core import log
from onegov.core.utils import safe_move
class MailDataManager(object):
""" Takes a postman and an envelope and sends it when the transaction
is commited.
Since we can't really know if a mail can be sent until it happens, we
simply log an exception if the sending failed.
"""
transaction_manager = transaction.manager
def __init__(self, postman, envelope):
self.postman = postman
self.envelope = envelope
@classmethod
def send_email(cls, postman, envelope):
transaction.get().join(cls(postman, envelope))
def sortKey(self):
return 'mails'
def bind_connection(self, transaction, connection):
assert 'mail_connection' not in transaction.extension
def after_commit_hook(*args):
connection.quit()
transaction.addAfterCommitHook(after_commit_hook)
transaction.extension['mail_connection'] = connection
def open_connection(self):
connection = self.postman.transport(
self.postman.host,
self.postman.port,
**self.postman.options
)
connection.ehlo()
for item in self.postman.middlewares:
item(connection)
return connection
def commit(self, transaction):
if 'mail_connection' not in transaction.extension:
self.bind_connection(transaction, self.open_connection())
try:
self.postman.deliver(
transaction.extension['mail_connection'],
self.envelope
)
except Exception:
log.exception("Failed to send e-mail")
def abort(self, transaction):
pass
def tpc_vote(self, transaction):
pass
def tpc_abort(self, transaction):
pass
def tpc_begin(self, transaction):
pass
def tpc_finish(self, transaction):
pass
class FileDataManager(object):
""" Writes a file when the transaction is commited. """
transaction_manager = transaction.manager
def __init__(self, data, path):
self.data = data
self.path = path
@classmethod
def write_file(cls, data, path):
transaction.get().join(cls(data, path))
def sortKey(self):
return 'files'
def commit(self, transaction):
with tempfile.NamedTemporaryFile(delete=False) as temp:
self.tempfn = temp.name
temp.write(self.data)
def abort(self, transaction):
pass
def tpc_vote(self, transaction):
if not os.path.exists(self.tempfn):
raise ValueError('%s doesnt exist' % self.tempfn)
if os.path.exists(self.path):
raise ValueError('file already exists')
def tpc_abort(self, transaction):
try:
os.remove(self.tempfn)
except OSError:
pass
def tpc_begin(self, transaction):
pass
def tpc_finish(self, transaction):
safe_move(self.tempfn, self.path)
| [
"os.path.exists",
"transaction.addAfterCommitHook",
"onegov.core.utils.safe_move",
"onegov.core.log.exception",
"tempfile.NamedTemporaryFile",
"transaction.get",
"os.remove"
] | [((889, 938), 'transaction.addAfterCommitHook', 'transaction.addAfterCommitHook', (['after_commit_hook'], {}), '(after_commit_hook)\n', (919, 938), False, 'import transaction\n'), ((2714, 2739), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (2728, 2739), False, 'import os\n'), ((3021, 3054), 'onegov.core.utils.safe_move', 'safe_move', (['self.tempfn', 'self.path'], {}), '(self.tempfn, self.path)\n', (3030, 3054), False, 'from onegov.core.utils import safe_move\n'), ((2390, 2431), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2417, 2431), False, 'import tempfile\n'), ((2612, 2639), 'os.path.exists', 'os.path.exists', (['self.tempfn'], {}), '(self.tempfn)\n', (2626, 2639), False, 'import os\n'), ((2857, 2879), 'os.remove', 'os.remove', (['self.tempfn'], {}), '(self.tempfn)\n', (2866, 2879), False, 'import os\n'), ((598, 615), 'transaction.get', 'transaction.get', ([], {}), '()\n', (613, 615), False, 'import transaction\n'), ((1668, 1706), 'onegov.core.log.exception', 'log.exception', (['"""Failed to send e-mail"""'], {}), "('Failed to send e-mail')\n", (1681, 1706), False, 'from onegov.core import log\n'), ((2254, 2271), 'transaction.get', 'transaction.get', ([], {}), '()\n', (2269, 2271), False, 'import transaction\n')] |
from django.conf import settings
from django.db import models
from django.dispatch import receiver
from django.urls import Resolver404, resolve
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from cms import operations
from cms.models import CMSPlugin, Placeholder
from cms.models.fields import PlaceholderField
from cms.signals import pre_placeholder_operation
from cms.utils.plugins import get_bound_plugins
def _get_placeholder_slot(category):
return 'module-category-{}'.format(category.pk)
@receiver(pre_placeholder_operation)
def sync_module_plugin(sender, **kwargs):
"""
Updates the created placeholder operation record,
based on the configured post operation handlers.
"""
operation_type = kwargs.pop('operation')
affected_operations = (operations.MOVE_PLUGIN, operations.PASTE_PLUGIN)
if operation_type not in affected_operations:
return
try:
match = resolve(kwargs['origin'])
except Resolver404:
match = None
is_in_modules = match and match.url_name == 'cms_modules_list'
if not is_in_modules:
return
plugin = kwargs['plugin']
placeholder = kwargs.get('target_placeholder')
needs_sync = (
plugin.plugin_type
== 'Module'
and placeholder.pk
!= plugin.module_category.modules_id
)
if needs_sync:
# User has moved module to another category placeholder
# or pasted a copied module plugin.
new_category = Category.objects.get(modules=placeholder)
(ModulePlugin
.objects
.filter(path__startswith=plugin.path, depth__gte=plugin.depth)
.update(module_category=new_category))
class Category(models.Model):
name = models.CharField(
verbose_name=_('Name'),
max_length=120,
unique=True,
)
modules = PlaceholderField(slotname=_get_placeholder_slot)
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
def __str__(self):
return self.name
@cached_property
def modules_placeholder(self):
return ModulesPlaceholder.objects.get(pk=self.modules_id)
def get_non_empty_modules(self):
unbound_plugins = (
self
.modules
.get_plugins(language=settings.LANGUAGE_CODE)
.filter(parent__isnull=True, numchild__gte=1)
)
return get_bound_plugins(unbound_plugins)
class ModulesPlaceholder(Placeholder):
class Meta:
proxy = True
def _get_attached_model(self):
return Category
def _get_attached_models(self):
return self._get_attached_model()
def _get_attached_objects(self):
return self._get_attached_model().objects.filter(modules=self.pk)
@cached_property
def category(self):
return self._get_attached_model().objects.get(modules=self.pk)
def get_label(self):
return self.category.name
class ModulePlugin(CMSPlugin):
module_name = models.CharField(
verbose_name=_('Name'),
max_length=120,
)
module_category = models.ForeignKey(
to=Category,
verbose_name=_('Category'),
on_delete=models.CASCADE,
)
def __str__(self):
return self.module_name
def update(self, refresh=False, **fields):
ModulePlugin.objects.filter(pk=self.pk).update(**fields)
if refresh:
return self.reload()
return
def get_unbound_plugins(self):
return CMSPlugin.get_tree(self).order_by('path')
| [
"cms.models.fields.PlaceholderField",
"django.utils.translation.gettext_lazy",
"cms.utils.plugins.get_bound_plugins",
"cms.models.CMSPlugin.get_tree",
"django.dispatch.receiver",
"django.urls.resolve"
] | [((564, 599), 'django.dispatch.receiver', 'receiver', (['pre_placeholder_operation'], {}), '(pre_placeholder_operation)\n', (572, 599), False, 'from django.dispatch import receiver\n'), ((1896, 1944), 'cms.models.fields.PlaceholderField', 'PlaceholderField', ([], {'slotname': '_get_placeholder_slot'}), '(slotname=_get_placeholder_slot)\n', (1912, 1944), False, 'from cms.models.fields import PlaceholderField\n'), ((978, 1003), 'django.urls.resolve', 'resolve', (["kwargs['origin']"], {}), "(kwargs['origin'])\n", (985, 1003), False, 'from django.urls import Resolver404, resolve\n'), ((1985, 1998), 'django.utils.translation.gettext_lazy', '_', (['"""Category"""'], {}), "('Category')\n", (1986, 1998), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2029, 2044), 'django.utils.translation.gettext_lazy', '_', (['"""Categories"""'], {}), "('Categories')\n", (2030, 2044), True, 'from django.utils.translation import gettext_lazy as _\n'), ((2462, 2496), 'cms.utils.plugins.get_bound_plugins', 'get_bound_plugins', (['unbound_plugins'], {}), '(unbound_plugins)\n', (2479, 2496), False, 'from cms.utils.plugins import get_bound_plugins\n'), ((1820, 1829), 'django.utils.translation.gettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (1821, 1829), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3094, 3103), 'django.utils.translation.gettext_lazy', '_', (['"""Name"""'], {}), "('Name')\n", (3095, 3103), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3218, 3231), 'django.utils.translation.gettext_lazy', '_', (['"""Category"""'], {}), "('Category')\n", (3219, 3231), True, 'from django.utils.translation import gettext_lazy as _\n'), ((3561, 3585), 'cms.models.CMSPlugin.get_tree', 'CMSPlugin.get_tree', (['self'], {}), '(self)\n', (3579, 3585), False, 'from cms.models import CMSPlugin, Placeholder\n')] |
#!/usr/bin/env python
#coding=utf-8
import json
from lib.sqs import zhihufav_sqs
from lib.tasks import add_note
def get_sqs_queue():
sqs_info = zhihufav_sqs.get_messages(10)
for sqs in sqs_info:
sqs_body = sqs.get_body()
receipt_handle = sqs.receipt_handle
sqs_json = json.loads(sqs_body)
api_url = sqs_json.get('api_url')
parent_note = sqs_json.get('parent_note')
add_note.delay(api_url, parent_note, receipt_handle)
if __name__=="__main__":
for i in range(5):
get_sqs_queue()
| [
"lib.tasks.add_note.delay",
"json.loads",
"lib.sqs.zhihufav_sqs.get_messages"
] | [((151, 180), 'lib.sqs.zhihufav_sqs.get_messages', 'zhihufav_sqs.get_messages', (['(10)'], {}), '(10)\n', (176, 180), False, 'from lib.sqs import zhihufav_sqs\n'), ((303, 323), 'json.loads', 'json.loads', (['sqs_body'], {}), '(sqs_body)\n', (313, 323), False, 'import json\n'), ((424, 476), 'lib.tasks.add_note.delay', 'add_note.delay', (['api_url', 'parent_note', 'receipt_handle'], {}), '(api_url, parent_note, receipt_handle)\n', (438, 476), False, 'from lib.tasks import add_note\n')] |
from subprocess import Popen, PIPE
cmd = "echo hello world"
p = Popen(cmd, shell=True, stdout=PIPE)
ret, err = p.communicate() | [
"subprocess.Popen"
] | [((65, 100), 'subprocess.Popen', 'Popen', (['cmd'], {'shell': '(True)', 'stdout': 'PIPE'}), '(cmd, shell=True, stdout=PIPE)\n', (70, 100), False, 'from subprocess import Popen, PIPE\n')] |
# Generated by Django 2.1.3 on 2018-11-18 02:34
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True)),
('modified_date', models.DateTimeField(auto_now=True)),
('contact', models.CharField(help_text='Name of the contact', max_length=255, verbose_name='Full Name')),
('email', models.EmailField(help_text='Contact email.', max_length=255, verbose_name='Email')),
('message', models.TextField(help_text='Message provided by the contact.', verbose_name='Message')),
('resolution', models.TextField(blank=True, help_text='Resolution if any for the conversation.', null=True, verbose_name='Resolution')),
],
),
]
| [
"django.db.models.EmailField",
"django.db.models.TextField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((308, 401), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (324, 401), False, 'from django.db import migrations, models\n'), ((433, 472), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (453, 472), False, 'from django.db import migrations, models\n'), ((509, 544), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (529, 544), False, 'from django.db import migrations, models\n'), ((575, 670), 'django.db.models.CharField', 'models.CharField', ([], {'help_text': '"""Name of the contact"""', 'max_length': '(255)', 'verbose_name': '"""Full Name"""'}), "(help_text='Name of the contact', max_length=255,\n verbose_name='Full Name')\n", (591, 670), False, 'from django.db import migrations, models\n'), ((695, 783), 'django.db.models.EmailField', 'models.EmailField', ([], {'help_text': '"""Contact email."""', 'max_length': '(255)', 'verbose_name': '"""Email"""'}), "(help_text='Contact email.', max_length=255, verbose_name=\n 'Email')\n", (712, 783), False, 'from django.db import migrations, models\n'), ((809, 900), 'django.db.models.TextField', 'models.TextField', ([], {'help_text': '"""Message provided by the contact."""', 'verbose_name': '"""Message"""'}), "(help_text='Message provided by the contact.', verbose_name\n ='Message')\n", (825, 900), False, 'from django.db import migrations, models\n'), ((929, 1058), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'help_text': '"""Resolution if any for the conversation."""', 'null': '(True)', 'verbose_name': '"""Resolution"""'}), "(blank=True, help_text=\n 'Resolution if any for the conversation.', null=True, verbose_name=\n 'Resolution')\n", (945, 1058), False, 'from django.db import migrations, models\n')] |
import requests
from bs4 import BeautifulSoup
import json
def loadMasterStock():
url = "http://www.supremenewyork.com/mobile_stock.json"
user = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1"}
# user = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
r = requests.get(url, headers=user)
masterStock = json.loads(r.text)
with open("masterstock.json", 'w') as outfile:
json.dump(masterStock, outfile, indent=4, sort_keys=True)
print("Saved to masterstock.json")
itemInfo = ""
while(True):
try:
item = input("Enter item name to get id or cntrl-c to quit: ")
except:
print("Exiting...")
if itemInfo != "":
itemInfo = itemInfo[:-1]
print("\n"+itemInfo)
with open("filteredStock.txt",'w') as outfile:
outfile.write(itemInfo)
exit()
if item == "new":
print("Getting all new items...")
for itemCount in range(len(masterStock['products_and_categories']["new"])):
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
else:
for itemCount in range(len(masterStock['products_and_categories']["new"])):
if item.lower() in str(masterStock['products_and_categories']["new"][itemCount]['name']).lower():
itemInfo += '"'+str(masterStock['products_and_categories']["new"][itemCount]['id'])+'":"'
print("Added "+str(masterStock['products_and_categories']["new"][itemCount]['name']))
itemInfo += str(masterStock['products_and_categories']["new"][itemCount]['name'])+'",'
# print(itemInfo)
if __name__ == '__main__':
loadMasterStock()
| [
"json.loads",
"json.dump",
"requests.get"
] | [((455, 486), 'requests.get', 'requests.get', (['url'], {'headers': 'user'}), '(url, headers=user)\n', (467, 486), False, 'import requests\n'), ((502, 520), 'json.loads', 'json.loads', (['r.text'], {}), '(r.text)\n', (512, 520), False, 'import json\n'), ((571, 628), 'json.dump', 'json.dump', (['masterStock', 'outfile'], {'indent': '(4)', 'sort_keys': '(True)'}), '(masterStock, outfile, indent=4, sort_keys=True)\n', (580, 628), False, 'import json\n')] |
from API.models import GoesWellWith, Menu
def get_goeswellwith_items(menuitem1):
entries = GoesWellWith.objects.filter(menuitem1=menuitem1)
result = []
if entries.count() <= 0:
result.append('None')
return result
else:
for e in entries:
result.append(Menu.objects.get(id=e.menuitem2_id).name)
return result | [
"API.models.Menu.objects.get",
"API.models.GoesWellWith.objects.filter"
] | [((97, 145), 'API.models.GoesWellWith.objects.filter', 'GoesWellWith.objects.filter', ([], {'menuitem1': 'menuitem1'}), '(menuitem1=menuitem1)\n', (124, 145), False, 'from API.models import GoesWellWith, Menu\n'), ((306, 341), 'API.models.Menu.objects.get', 'Menu.objects.get', ([], {'id': 'e.menuitem2_id'}), '(id=e.menuitem2_id)\n', (322, 341), False, 'from API.models import GoesWellWith, Menu\n')] |
from typing import Optional
from algorithms.basic_testing import BasicTesting
from simulator.controllers.main_controller import MainController
from simulator.controllers.map.map_controller import MapController
from simulator.controllers.gui.gui_controller import GuiController
from simulator.models.main_model import MainModel
from simulator.models.map_model import MapModel
from simulator.services.debug import DebugLevel
from simulator.services.services import Services
from simulator.services.event_manager.events.event import Event
from simulator.services.event_manager.events.reinit_event import ReinitEvent
from simulator.views.main_view import MainView
from simulator.views.map.map_view import MapView
from simulator.views.gui.gui_view import GuiView
from structures import Size
"""
Implementation is done after https://github.com/wesleywerner/mvc-game-design
"""
class Simulator:
"""
The main simulator class
"""
__services: Services
__main: MainModel
__map: MapModel
__main_controller: MainController
__map_controller: MapController
__gui_controller: GuiController
__main_view: MainView
__map_view: MapView
__gui_view: GuiView
def __init__(self, services: Services) -> None:
# init services
self.__services = services
self.__services.ev_manager.register_listener(self)
self.__main = None
self.__map = None
self.__main_controller = None
self.__map_controller = None
self.__gui_controller = None
self.__main_view = None
self.__map_view = None
def start(self) -> Optional[BasicTesting]:
"""
Starts the simulator
:return The testing results if any
"""
if self.__services.settings.simulator_graphics:
return self.__start_with_graphics()
else:
return self.__start_without_graphics()
def __try_setup_map_graphics(self) -> None:
if self.__services.algorithm.instance is not None:
if self.__map_controller is not None:
self.__map_controller.destroy()
if self.__map_view is not None:
self.__map_view.destroy()
self.__map = MapModel(self.__services)
self.__map_view = MapView(self.__services, self.__map, self.__main_view)
self.__map_controller = MapController(self.__map_view, self.__services, self.__map)
def __start_with_graphics(self) -> None:
"""
Starts simulator with graphics
"""
# init models, views, controllers
self.__main = MainModel(self.__services)
# init views
self.__main_view = MainView(self.__services, self.__main, None)
self.__gui_view = GuiView(self.__services, None, self.__main_view)
# init controllers
self.__main_controller = MainController(self.__services, self.__main)
self.__gui_controller = GuiController(self.__gui_view, self.__services,self.__main)
self.__try_setup_map_graphics()
self.__main.run()
def __start_without_graphics(self) -> Optional[BasicTesting]:
"""
Starts simulator without graphics
:return: The test results
"""
self.__services.algorithm.instance.find_path()
return self.__services.algorithm.instance.testing
def notify(self, event: Event) -> None:
if isinstance(event, ReinitEvent):
if self.__map:
"""
self.__map.stop_algorithm()
if self.__map.last_thread:
self.__map.last_thread.join()
"""
self.__map.reset()
self.__services.ev_manager.unregister_listener(self.__map)
self.__services.ev_manager.unregister_tick_listener(self.__map)
self.__try_setup_map_graphics()
@property
def services(self) -> Services:
return self.__services
| [
"simulator.views.map.map_view.MapView",
"simulator.models.map_model.MapModel",
"simulator.controllers.gui.gui_controller.GuiController",
"simulator.views.gui.gui_view.GuiView",
"simulator.models.main_model.MainModel",
"simulator.controllers.map.map_controller.MapController",
"simulator.views.main_view.M... | [((2599, 2625), 'simulator.models.main_model.MainModel', 'MainModel', (['self.__services'], {}), '(self.__services)\n', (2608, 2625), False, 'from simulator.models.main_model import MainModel\n'), ((2675, 2719), 'simulator.views.main_view.MainView', 'MainView', (['self.__services', 'self.__main', 'None'], {}), '(self.__services, self.__main, None)\n', (2683, 2719), False, 'from simulator.views.main_view import MainView\n'), ((2746, 2794), 'simulator.views.gui.gui_view.GuiView', 'GuiView', (['self.__services', 'None', 'self.__main_view'], {}), '(self.__services, None, self.__main_view)\n', (2753, 2794), False, 'from simulator.views.gui.gui_view import GuiView\n'), ((2857, 2901), 'simulator.controllers.main_controller.MainController', 'MainController', (['self.__services', 'self.__main'], {}), '(self.__services, self.__main)\n', (2871, 2901), False, 'from simulator.controllers.main_controller import MainController\n'), ((2934, 2994), 'simulator.controllers.gui.gui_controller.GuiController', 'GuiController', (['self.__gui_view', 'self.__services', 'self.__main'], {}), '(self.__gui_view, self.__services, self.__main)\n', (2947, 2994), False, 'from simulator.controllers.gui.gui_controller import GuiController\n'), ((2219, 2244), 'simulator.models.map_model.MapModel', 'MapModel', (['self.__services'], {}), '(self.__services)\n', (2227, 2244), False, 'from simulator.models.map_model import MapModel\n'), ((2275, 2329), 'simulator.views.map.map_view.MapView', 'MapView', (['self.__services', 'self.__map', 'self.__main_view'], {}), '(self.__services, self.__map, self.__main_view)\n', (2282, 2329), False, 'from simulator.views.map.map_view import MapView\n'), ((2366, 2425), 'simulator.controllers.map.map_controller.MapController', 'MapController', (['self.__map_view', 'self.__services', 'self.__map'], {}), '(self.__map_view, self.__services, self.__map)\n', (2379, 2425), False, 'from simulator.controllers.map.map_controller import MapController\n')] |
import settings
import handlers.base_handler
import csv
class CartogramHandler(handlers.base_handler.BaseCartogramHandler):
def get_name(self):
return "Lebanon"
def get_gen_file(self):
return "{}/lbn_processedmap.json".format(settings.CARTOGRAM_DATA_DIR)
def validate_values(self, values):
if len(values) != 8:
return False
for v in values:
if type(v) != float:
return False
return True
def gen_area_data(self, values):
return """1 {} Akkar
2 {} Baalbak-Hermel
3 {} Beirut
4 {} Beqaa
5 {} Mount Lebanon
6 {} Nabatieh
7 {} North
8 {} South""".format(*values)
def expect_geojson_output(self):
return True
def csv_to_area_string_and_colors(self, csvfile):
return self.order_by_example(csv.reader(csvfile), "Governorate", 0, 1, 2, 3, ["Akkar","Baalbak-Hermel","Beirut","Beqaa","Mount Lebanon","Nabatieh","North","South"], [0.0 for i in range(0,8)], {"Akkar":"1","Baalbak-Hermel":"2","Beirut":"3","Beqaa":"4","Mount Lebanon":"5","Nabatieh":"6","North":"7","South":"8"})
| [
"csv.reader"
] | [((841, 860), 'csv.reader', 'csv.reader', (['csvfile'], {}), '(csvfile)\n', (851, 860), False, 'import csv\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import click
@click.command()
@click.option('-n', '--name', default='World', help='Greeting partner')
def cmd(name):
"""
Show greeting message.
:type name: str
"""
msg = 'Hello, {name}!'.format(name=name)
click.echo(msg)
def main():
cmd()
if __name__ == '__main__':
main()
| [
"click.option",
"click.echo",
"click.command"
] | [((63, 78), 'click.command', 'click.command', ([], {}), '()\n', (76, 78), False, 'import click\n'), ((80, 150), 'click.option', 'click.option', (['"""-n"""', '"""--name"""'], {'default': '"""World"""', 'help': '"""Greeting partner"""'}), "('-n', '--name', default='World', help='Greeting partner')\n", (92, 150), False, 'import click\n'), ((278, 293), 'click.echo', 'click.echo', (['msg'], {}), '(msg)\n', (288, 293), False, 'import click\n')] |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import mojo_lexer
import unittest
# Try to load the ply module, if not, then assume it is in the third_party
# directory.
try:
# Disable lint check which fails to find the ply module.
# pylint: disable=F0401
from ply import lex
except ImportError:
# This assumes this file is in src/mojo/public/tools/bindings/pylib/parse/.
module_path, module_name = os.path.split(__file__)
third_party = os.path.join(module_path, os.pardir, os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, 'third_party')
sys.path.append(third_party)
# pylint: disable=F0401
from ply import lex
# This (monkey-patching LexToken to make comparison value-based) is evil, but
# we'll do it anyway. (I'm pretty sure ply's lexer never cares about comparing
# for object identity.)
def _LexTokenEq(self, other):
return self.type == other.type and self.value == other.value and \
self.lineno == other.lineno and self.lexpos == other.lexpos
setattr(lex.LexToken, '__eq__', _LexTokenEq)
def _MakeLexToken(type, value, lineno=1, lexpos=0):
"""Makes a LexToken with the given parameters. (Note that lineno is 1-based,
but lexpos is 0-based.)"""
rv = lex.LexToken()
rv.type, rv.value, rv.lineno, rv.lexpos = type, value, lineno, lexpos
return rv
def _MakeLexTokenForKeyword(keyword, **kwargs):
"""Makes a LexToken for the given keyword."""
return _MakeLexToken(keyword.upper(), keyword.lower(), **kwargs)
class MojoLexerTest(unittest.TestCase):
"""Tests mojo_lexer (in particular, Lexer)."""
def __init__(self, *args, **kwargs):
unittest.TestCase.__init__(self, *args, **kwargs)
# Clone all lexer instances from this one, since making a lexer is slow.
self._zygote_lexer = lex.lex(mojo_lexer.Lexer("my_file.mojom"))
def testValidSingleKeywords(self):
"""Tests valid, single keywords."""
self.assertEquals(self._SingleTokenForInput("handle"),
_MakeLexTokenForKeyword("handle"))
self.assertEquals(self._SingleTokenForInput("data_pipe_consumer"),
_MakeLexTokenForKeyword("data_pipe_consumer"))
self.assertEquals(self._SingleTokenForInput("data_pipe_producer"),
_MakeLexTokenForKeyword("data_pipe_producer"))
self.assertEquals(self._SingleTokenForInput("message_pipe"),
_MakeLexTokenForKeyword("message_pipe"))
self.assertEquals(self._SingleTokenForInput("import"),
_MakeLexTokenForKeyword("import"))
self.assertEquals(self._SingleTokenForInput("module"),
_MakeLexTokenForKeyword("module"))
self.assertEquals(self._SingleTokenForInput("struct"),
_MakeLexTokenForKeyword("struct"))
self.assertEquals(self._SingleTokenForInput("interface"),
_MakeLexTokenForKeyword("interface"))
self.assertEquals(self._SingleTokenForInput("enum"),
_MakeLexTokenForKeyword("enum"))
def testValidSingleTokens(self):
"""Tests valid, single (non-keyword) tokens."""
self.assertEquals(self._SingleTokenForInput("asdf"),
_MakeLexToken("NAME", "asdf"))
self.assertEquals(self._SingleTokenForInput("@123"),
_MakeLexToken("ORDINAL", "@123"))
self.assertEquals(self._SingleTokenForInput("456"),
_MakeLexToken("INT_CONST_DEC", "456"))
self.assertEquals(self._SingleTokenForInput("0765"),
_MakeLexToken("INT_CONST_OCT", "0765"))
self.assertEquals(self._SingleTokenForInput("0x01aB2eF3"),
_MakeLexToken("INT_CONST_HEX", "0x01aB2eF3"))
self.assertEquals(self._SingleTokenForInput("123.456"),
_MakeLexToken("FLOAT_CONST", "123.456"))
self.assertEquals(self._SingleTokenForInput("'x'"),
_MakeLexToken("CHAR_CONST", "'x'"))
self.assertEquals(self._SingleTokenForInput("\"hello\""),
_MakeLexToken("STRING_LITERAL", "\"hello\""))
self.assertEquals(self._SingleTokenForInput("+"),
_MakeLexToken("PLUS", "+"))
self.assertEquals(self._SingleTokenForInput("-"),
_MakeLexToken("MINUS", "-"))
self.assertEquals(self._SingleTokenForInput("*"),
_MakeLexToken("TIMES", "*"))
self.assertEquals(self._SingleTokenForInput("/"),
_MakeLexToken("DIVIDE", "/"))
self.assertEquals(self._SingleTokenForInput("%"),
_MakeLexToken("MOD", "%"))
self.assertEquals(self._SingleTokenForInput("|"),
_MakeLexToken("OR", "|"))
self.assertEquals(self._SingleTokenForInput("~"),
_MakeLexToken("NOT", "~"))
self.assertEquals(self._SingleTokenForInput("^"),
_MakeLexToken("XOR", "^"))
self.assertEquals(self._SingleTokenForInput("<<"),
_MakeLexToken("LSHIFT", "<<"))
self.assertEquals(self._SingleTokenForInput(">>"),
_MakeLexToken("RSHIFT", ">>"))
self.assertEquals(self._SingleTokenForInput("="),
_MakeLexToken("EQUALS", "="))
self.assertEquals(self._SingleTokenForInput("=>"),
_MakeLexToken("RESPONSE", "=>"))
self.assertEquals(self._SingleTokenForInput("("),
_MakeLexToken("LPAREN", "("))
self.assertEquals(self._SingleTokenForInput(")"),
_MakeLexToken("RPAREN", ")"))
self.assertEquals(self._SingleTokenForInput("["),
_MakeLexToken("LBRACKET", "["))
self.assertEquals(self._SingleTokenForInput("]"),
_MakeLexToken("RBRACKET", "]"))
self.assertEquals(self._SingleTokenForInput("{"),
_MakeLexToken("LBRACE", "{"))
self.assertEquals(self._SingleTokenForInput("}"),
_MakeLexToken("RBRACE", "}"))
self.assertEquals(self._SingleTokenForInput("<"),
_MakeLexToken("LANGLE", "<"))
self.assertEquals(self._SingleTokenForInput(">"),
_MakeLexToken("RANGLE", ">"))
self.assertEquals(self._SingleTokenForInput(";"),
_MakeLexToken("SEMI", ";"))
self.assertEquals(self._SingleTokenForInput(","),
_MakeLexToken("COMMA", ","))
self.assertEquals(self._SingleTokenForInput("."),
_MakeLexToken("DOT", "."))
def _TokensForInput(self, input):
"""Gets a list of tokens for the given input string."""
lexer = self._zygote_lexer.clone()
lexer.input(input)
rv = []
while True:
tok = lexer.token()
if not tok:
return rv
rv.append(tok)
def _SingleTokenForInput(self, input):
"""Gets the single token for the given input string. (Raises an exception if
the input string does not result in exactly one token.)"""
toks = self._TokensForInput(input)
assert len(toks) == 1
return toks[0]
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"ply.lex.LexToken",
"mojo_lexer.Lexer",
"unittest.TestCase.__init__"
] | [((1345, 1359), 'ply.lex.LexToken', 'lex.LexToken', ([], {}), '()\n', (1357, 1359), False, 'from ply import lex\n'), ((7166, 7181), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7179, 7181), False, 'import unittest\n'), ((1744, 1793), 'unittest.TestCase.__init__', 'unittest.TestCase.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1770, 1793), False, 'import unittest\n'), ((1904, 1937), 'mojo_lexer.Lexer', 'mojo_lexer.Lexer', (['"""my_file.mojom"""'], {}), "('my_file.mojom')\n", (1920, 1937), False, 'import mojo_lexer\n')] |
#!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_routemap
short_description: Manage EOS routemap resources
description:
- This module will manage routemap entries on EOS nodes
version_added: 1.2.0
category: Route Policy
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.4.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The name of the routemap to manage.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
action:
description:
- The action associated with the routemap name.
required: true
default: 'permit'
choices: ['permit','deny']
aliases: []
version_added: 1.2.0
seqno:
description:
- The sequence number of the rule that this entry corresponds to.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
description:
description:
- The description for this routemap entry.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
match:
description:
- The list of match statements that define the routemap entry. The
match statements should be a comma separated list of match statements
without the word match at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
set:
description:
- The list of set statements that define the routemap entry. The
set statements should be a comma separated list of set statements
without the word set at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
continue:
description:
- The statement defines the next routemap clause to evaluate.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
"""
EXAMPLES = """
- eos_routemap: name=rm1 action=permit seqno=10
description='this is a great routemap'
match='as 50,interface Ethernet2'
set='tag 100,weight 1000'
continue=20
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosConnection(object):
__attributes__ = ['username', 'password', 'host', 'transport', 'port']
def __init__(self, **kwargs):
self.connection = kwargs['connection']
self.transport = kwargs.get('transport')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.config = kwargs.get('config')
def connect(self):
if self.config is not None:
pyeapi.load_config(self.config)
config = dict()
if self.connection is not None:
config = pyeapi.config_for(self.connection)
if not config:
msg = 'Connection name "{}" not found'.format(self.connection)
for key in self.__attributes__:
if getattr(self, key) is not None:
config[key] = getattr(self, key)
if 'transport' not in config:
raise ValueError('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
node.enable('show version')
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
raise ValueError('unable to connect to {}'.format(node))
return node
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, autorefresh=False, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
## Ok, so in Ansible 2.0,
## AnsibleModule.__init__() sets self.params and then
## calls self.log()
## (through self._log_invocation())
##
## However, self.log() (overridden in EosAnsibleModule)
## references self._logging
## and self._logging (defined in EosAnsibleModule)
## references self.params.
##
## So ... I'm defining self._logging without "or self.params['logging']"
## *before* AnsibleModule.__init__() to avoid a "ref before def".
##
## I verified that this works with Ansible 1.9.4 and 2.0.0.2.
## The only caveat is that the first log message in
## AnsibleModule.__init__() won't be subject to the value of
## self.params['logging'].
self._logging = kwargs.get('logging')
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._autorefresh = autorefresh
self._node = EosConnection(**self.params)
self._node.connect()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
# After a create command, flush the running-config
# so we get the latest for any other attributes
self._node._running_config = None
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
func = self.func(self.desired_state)
changed = self.invoke(func, self)
self.result['changed'] = changed or True
self.refresh()
# By calling self.instance here we trigger another show running-config
# all which causes delay. Only if debug is enabled do we call this
# since it will display the latest state of the object.
if self._debug:
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
self.log('Creating connection with autorefresh=%s' % self._autorefresh)
node = pyeapi.client.Node(connection, autorefresh=self._autorefresh,
**config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, log_args=None, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Routemaps based on name, action and sequence
number.
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
_instance = dict(name=name, action=action, seqno=seqno, state='absent')
try:
result = module.api('routemaps').get(name)[action][seqno]
except:
result = None
if result:
_instance['state'] = 'present'
_instance['seqno'] = str(seqno)
_instance['set'] = ','.join(result['set'])
desc = result['description']
_instance['description'] = desc if desc else ''
_instance['match'] = ','.join(result['match'])
cont = result['continue']
_instance['continue'] = str(cont) if cont else ''
return _instance
def create(module):
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
module.log('Invoked create for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.api('routemaps').create(name, action, seqno)
def remove(module):
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
module.log('Invoked remove for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.api('routemaps').delete(name, action, seqno)
def set_description(module):
""" Configures the description for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
value = module.attributes['description']
module.log('Invoked set_description with %s for eos_routemap[%s %s %s]'
% (value, name, action, seqno))
if value == '':
module.node.api('routemaps').set_description(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_description(name, action, seqno, value)
def set_continue(module):
""" Configures the continue value for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
try:
value = int(module.attributes['continue'])
except:
value = None
module.log('Invoked set_continue for eos_routemap[%s %s %s]'
% (name, action, seqno))
if value is None:
module.node.api('routemaps').set_continue(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_continue(name, action, seqno, value)
def set_match(module):
""" Configures the match statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['match'].split(',')
module.log('Invoked set_match for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_match_statements(name, action, seqno,
statements)
def set_set(module):
""" Configures the set statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['set'].split(',')
module.log('Invoked set_set for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_set_statements(name, action, seqno,
statements)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
action=dict(default='permit', choices=['permit', 'deny']),
seqno=dict(required=True),
description=dict(),
match=dict(),
set=dict()
)
argument_spec['continue'] = dict()
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main() | [
"pyeapi.config_for",
"syslog.openlog",
"pyeapi.client.make_connection",
"pyeapi.client.Node",
"pyeapi.load_config"
] | [((5396, 5435), 'pyeapi.client.make_connection', 'pyeapi.client.make_connection', ([], {}), '(**config)\n', (5425, 5435), False, 'import pyeapi\n'), ((5451, 5491), 'pyeapi.client.Node', 'pyeapi.client.Node', (['connection'], {}), '(connection, **config)\n', (5469, 5491), False, 'import pyeapi\n'), ((13693, 13732), 'pyeapi.client.make_connection', 'pyeapi.client.make_connection', ([], {}), '(**config)\n', (13722, 13732), False, 'import pyeapi\n'), ((13828, 13899), 'pyeapi.client.Node', 'pyeapi.client.Node', (['connection'], {'autorefresh': 'self._autorefresh'}), '(connection, autorefresh=self._autorefresh, **config)\n', (13846, 13899), False, 'import pyeapi\n'), ((4871, 4902), 'pyeapi.load_config', 'pyeapi.load_config', (['self.config'], {}), '(self.config)\n', (4889, 4902), False, 'import pyeapi\n'), ((4990, 5024), 'pyeapi.config_for', 'pyeapi.config_for', (['self.connection'], {}), '(self.connection)\n', (5007, 5024), False, 'import pyeapi\n'), ((12810, 12851), 'pyeapi.load_config', 'pyeapi.load_config', (["self.params['config']"], {}), "(self.params['config'])\n", (12828, 12851), False, 'import pyeapi\n'), ((12937, 12981), 'pyeapi.config_for', 'pyeapi.config_for', (["self.params['connection']"], {}), "(self.params['connection'])\n", (12954, 12981), False, 'import pyeapi\n'), ((15643, 15672), 'syslog.openlog', 'syslog.openlog', (['"""ansible-eos"""'], {}), "('ansible-eos')\n", (15657, 15672), False, 'import syslog\n')] |
"""Setup configuration."""
import setuptools
from furystoolbox import __version__
with open("README.md", "r") as fh:
LONG = fh.read()
REQUIRES = ['click>=7.0',
'requests>=2.21.0',
'PyGithub>=1.43.4']
setuptools.setup(
name="furystoolbox",
version=__version__,
author="<NAME>",
author_email="<EMAIL>",
description="A collection of tools.",
long_description=LONG,
long_description_content_type="text/markdown",
url="https://github.com/ludeeus/furystoolbox",
install_requires=REQUIRES,
packages=setuptools.find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
entry_points={
'console_scripts': [
'fury = furystoolbox.cli.cli:CLI'
]
}
)
| [
"setuptools.find_packages"
] | [((563, 589), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (587, 589), False, 'import setuptools\n')] |
"""
This is the script containing the calibration module, basically calculating homography matrix.
This code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:
# The license is only for non-commercial use (commercial licenses can be obtained from Stanford).
# The material is provided as-is, with no warranties whatsoever.
# If you publish any code, data, or scientific work based on this, please cite our work.
Technical Paper:
<NAME>, <NAME>, <NAME>, <NAME>. Neural Holography with Camera-in-the-loop Training. ACM TOG (SIGGRAPH Asia), 2020.
"""
import cv2
import matplotlib.pyplot as plt
import numpy as np
def circle_detect(captured_img, num_circles, spacing, pad_pixels=(0., 0.), show_preview=True):
"""
Detects the circle of a circle board pattern
:param captured_img: captured image
:param num_circles: a tuple of integers, (num_circle_x, num_circle_y)
:param spacing: a tuple of integers, in pixels, (space between circles in x, space btw circs in y direction)
:param show_preview: boolean, default True
:param pad_pixels: coordinate of the left top corner of warped image.
Assuming pad this amount of pixels on the other side.
:return: a tuple, (found_dots, H)
found_dots: boolean, indicating success of calibration
H: a 3x3 homography matrix (numpy)
"""
# Binarization
# org_copy = org.copy() # Otherwise, we write on the original image!
img = (captured_img.copy() * 255).astype(np.uint8)
if len(img.shape) > 2:
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.medianBlur(img, 15)
img_gray = img.copy()
img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 121, 0)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (15, 15))
img = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
img = 255 - img
# Blob detection
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.filterByColor = True
params.minThreshold = 128
# Filter by Area.
params.filterByArea = True
params.minArea = 50
# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.785
# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.87
# Filter by Inertia
params.filterByInertia = False
params.minInertiaRatio = 0.01
detector = cv2.SimpleBlobDetector_create(params)
# Detecting keypoints
# this is redundant for what comes next, but gives us access to the detected dots for debug
keypoints = detector.detect(img)
found_dots, centers = cv2.findCirclesGrid(img, num_circles,
blobDetector=detector, flags=cv2.CALIB_CB_SYMMETRIC_GRID)
# Drawing the keypoints
cv2.drawChessboardCorners(captured_img, num_circles, centers, found_dots)
img_gray = cv2.drawKeypoints(img_gray, keypoints, np.array([]), (0, 255, 0),
cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# Find transformation
H = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=np.float32)
if found_dots:
# Generate reference points to compute the homography
ref_pts = np.zeros((num_circles[0] * num_circles[1], 1, 2), np.float32)
pos = 0
for i in range(0, num_circles[1]):
for j in range(0, num_circles[0]):
ref_pts[pos, 0, :] = spacing * np.array([j, i]) + np.array(pad_pixels)
pos += 1
H, mask = cv2.findHomography(centers, ref_pts, cv2.RANSAC, 1)
if show_preview:
dsize = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(num_circles, spacing, pad_pixels)]
captured_img_warp = cv2.warpPerspective(captured_img, H, tuple(dsize))
if show_preview:
fig = plt.figure()
ax = fig.add_subplot(223)
ax.imshow(img_gray, cmap='gray')
ax2 = fig.add_subplot(221)
ax2.imshow(img, cmap='gray')
ax3 = fig.add_subplot(222)
ax3.imshow(captured_img, cmap='gray')
if found_dots:
ax4 = fig.add_subplot(224)
ax4.imshow(captured_img_warp, cmap='gray')
plt.show()
return found_dots, H
class Calibration:
def __init__(self, num_circles=(21, 12), spacing_size=(80, 80), pad_pixels=(0, 0)):
self.num_circles = num_circles
self.spacing_size = spacing_size
self.pad_pixels = pad_pixels
self.h_transform = np.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
def calibrate(self, img, show_preview=True):
found_corners, self.h_transform = circle_detect(img, self.num_circles,
self.spacing_size, self.pad_pixels, show_preview)
return found_corners
def get_transform(self):
return self.h_transform
def __call__(self, input_img, img_size=None):
"""
This forward pass returns the warped image.
:param input_img: A numpy grayscale image shape of [H, W].
:param img_size: output size, default None.
:return: output_img: warped image with pre-calculated homography and destination size.
"""
if img_size is None:
img_size = [int((num_circs - 1) * space + 2 * pad_pixs)
for num_circs, space, pad_pixs in zip(self.num_circles, self.spacing_size, self.pad_pixels)]
output_img = cv2.warpPerspective(input_img, self.h_transform, tuple(img_size))
return output_img
| [
"cv2.findCirclesGrid",
"cv2.SimpleBlobDetector_create",
"cv2.findHomography",
"cv2.medianBlur",
"cv2.morphologyEx",
"cv2.adaptiveThreshold",
"cv2.SimpleBlobDetector_Params",
"numpy.array",
"numpy.zeros",
"cv2.cvtColor",
"matplotlib.pyplot.figure",
"cv2.drawChessboardCorners",
"cv2.getStructu... | [((1681, 1704), 'cv2.medianBlur', 'cv2.medianBlur', (['img', '(15)'], {}), '(img, 15)\n', (1695, 1704), False, 'import cv2\n'), ((1742, 1837), 'cv2.adaptiveThreshold', 'cv2.adaptiveThreshold', (['img', '(255)', 'cv2.ADAPTIVE_THRESH_GAUSSIAN_C', 'cv2.THRESH_BINARY', '(121)', '(0)'], {}), '(img, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.\n THRESH_BINARY, 121, 0)\n', (1763, 1837), False, 'import cv2\n'), ((1846, 1900), 'cv2.getStructuringElement', 'cv2.getStructuringElement', (['cv2.MORPH_ELLIPSE', '(15, 15)'], {}), '(cv2.MORPH_ELLIPSE, (15, 15))\n', (1871, 1900), False, 'import cv2\n'), ((1911, 1956), 'cv2.morphologyEx', 'cv2.morphologyEx', (['img', 'cv2.MORPH_OPEN', 'kernel'], {}), '(img, cv2.MORPH_OPEN, kernel)\n', (1927, 1956), False, 'import cv2\n'), ((2012, 2043), 'cv2.SimpleBlobDetector_Params', 'cv2.SimpleBlobDetector_Params', ([], {}), '()\n', (2041, 2043), False, 'import cv2\n'), ((2514, 2551), 'cv2.SimpleBlobDetector_create', 'cv2.SimpleBlobDetector_create', (['params'], {}), '(params)\n', (2543, 2551), False, 'import cv2\n'), ((2738, 2838), 'cv2.findCirclesGrid', 'cv2.findCirclesGrid', (['img', 'num_circles'], {'blobDetector': 'detector', 'flags': 'cv2.CALIB_CB_SYMMETRIC_GRID'}), '(img, num_circles, blobDetector=detector, flags=cv2.\n CALIB_CB_SYMMETRIC_GRID)\n', (2757, 2838), False, 'import cv2\n'), ((2913, 2986), 'cv2.drawChessboardCorners', 'cv2.drawChessboardCorners', (['captured_img', 'num_circles', 'centers', 'found_dots'], {}), '(captured_img, num_circles, centers, found_dots)\n', (2938, 2986), False, 'import cv2\n'), ((3180, 3259), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {'dtype': 'np.float32'}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], dtype=np.float32)\n', (3188, 3259), True, 'import numpy as np\n'), ((1632, 1669), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_BGR2GRAY'], {}), '(img, cv2.COLOR_BGR2GRAY)\n', (1644, 1669), False, 'import cv2\n'), ((3041, 3053), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3049, 3053), True, 'import numpy as np\n'), ((3386, 3447), 'numpy.zeros', 'np.zeros', (['(num_circles[0] * num_circles[1], 1, 2)', 'np.float32'], {}), '((num_circles[0] * num_circles[1], 1, 2), np.float32)\n', (3394, 3447), True, 'import numpy as np\n'), ((3685, 3736), 'cv2.findHomography', 'cv2.findHomography', (['centers', 'ref_pts', 'cv2.RANSAC', '(1)'], {}), '(centers, ref_pts, cv2.RANSAC, 1)\n', (3703, 3736), False, 'import cv2\n'), ((4040, 4052), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4050, 4052), True, 'import matplotlib.pyplot as plt\n'), ((4411, 4421), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4419, 4421), True, 'import matplotlib.pyplot as plt\n'), ((4701, 4762), 'numpy.array', 'np.array', (['[[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]'], {}), '([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])\n', (4709, 4762), True, 'import numpy as np\n'), ((3620, 3640), 'numpy.array', 'np.array', (['pad_pixels'], {}), '(pad_pixels)\n', (3628, 3640), True, 'import numpy as np\n'), ((3601, 3617), 'numpy.array', 'np.array', (['[j, i]'], {}), '([j, i])\n', (3609, 3617), True, 'import numpy as np\n')] |
'''
# Amazon Kinesis Construct Library
<!--BEGIN STABILITY BANNER-->---


---
<!--END STABILITY BANNER-->
[Amazon Kinesis](https://docs.aws.amazon.com/streams/latest/dev/introduction.html) provides collection and processing of large
[streams](https://aws.amazon.com/streaming-data/) of data records in real time. Kinesis data streams can be used for rapid and continuous data
intake and aggregation.
## Table Of Contents
* [Streams](#streams)
* [Encryption](#encryption)
* [Import](#import)
* [Permission Grants](#permission-grants)
* [Read Permissions](#read-permissions)
* [Write Permissions](#write-permissions)
* [Custom Permissions](#custom-permissions)
* [Metrics](#metrics)
## Streams
Amazon Kinesis Data Streams ingests a large amount of data in real time, durably stores the data, and makes the data available for consumption.
Using the CDK, a new Kinesis stream can be created as part of the stack using the construct's constructor. You may specify the `streamName` to give
your own identifier to the stream. If not, CloudFormation will generate a name.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream"
)
```
You can also specify properties such as `shardCount` to indicate how many shards the stream should choose and a `retentionPeriod`
to specify how long the data in the shards should remain accessible.
Read more at [Creating and Managing Streams](https://docs.aws.amazon.com/streams/latest/dev/working-with-streams.html)
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyFirstStream",
stream_name="my-awesome-stream",
shard_count=3,
retention_period=Duration.hours(48)
)
```
### Encryption
[Stream encryption](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html) enables
server-side encryption using an AWS KMS key for a specified stream.
Encryption is enabled by default on your stream with the master key owned by Kinesis Data Streams in regions where it is supported.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream")
```
You can enable encryption on your stream with a user-managed key by specifying the `encryption` property.
A KMS key will be created for you and associated with the stream.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
```
You can also supply your own external KMS key to use for stream encryption by specifying the `encryptionKey` property.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
import aws_cdk.aws_kms as kms
key = kms.Key(self, "MyKey")
Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS,
encryption_key=key
)
```
### Import
Any Kinesis stream that has been created outside the stack can be imported into your CDK app.
Streams can be imported by their ARN via the `Stream.fromStreamArn()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_arn(stack, "ImportedStream", "arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j")
```
Encrypted Streams can also be imported by their attributes via the `Stream.fromStreamAttributes()` API
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
from aws_cdk.aws_kms import Key
stack = Stack(app, "MyStack")
imported_stream = Stream.from_stream_attributes(stack, "ImportedEncryptedStream",
stream_arn="arn:aws:kinesis:us-east-2:123456789012:stream/f3j09j2230j",
encryption_key=kms.Key.from_key_arn("arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012")
)
```
### Permission Grants
IAM roles, users or groups which need to be able to work with Amazon Kinesis streams at runtime should be granted IAM permissions.
Any object that implements the `IGrantable` interface (has an associated principal) can be granted permissions by calling:
* `grantRead(principal)` - grants the principal read access
* `grantWrite(principal)` - grants the principal write permissions to a Stream
* `grantReadWrite(principal)` - grants principal read and write permissions
#### Read Permissions
Grant `read` access to a stream by calling the `grantRead()` API.
If the stream has an encryption key, read permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to read stream
stream.grant_read(lambda_role)
```
The following read permissions are provided to a service principal by the `grantRead()` API:
* `kinesis:DescribeStreamSummary`
* `kinesis:GetRecords`
* `kinesis:GetShardIterator`
* `kinesis:ListShards`
* `kinesis:SubscribeToShard`
#### Write Permissions
Grant `write` permissions to a stream is provided by calling the `grantWrite()` API.
If the stream has an encryption key, write permissions will also be granted to the key.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
lambda_role = iam.Role(self, "Role",
assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"),
description="Example role..."
)
stream = Stream(self, "MyEncryptedStream",
encryption=StreamEncryption.KMS
)
# give lambda permissions to write to stream
stream.grant_write(lambda_role)
```
The following write permissions are provided to a service principal by the `grantWrite()` API:
* `kinesis:ListShards`
* `kinesis:PutRecord`
* `kinesis:PutRecords`
#### Custom Permissions
You can add any set of permissions to a stream by calling the `grant()` API.
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
user = iam.User(stack, "MyUser")
stream = Stream(stack, "MyStream")
# give my user permissions to list shards
stream.grant(user, "kinesis:ListShards")
```
### Metrics
You can use common metrics from your stream to create alarms and/or dashboards. The `stream.metric('MetricName')` method creates a metric with the stream namespace and dimension. You can also use pre-define methods like `stream.metricGetRecordsSuccess()`. To find out more about Kinesis metrics check [Monitoring the Amazon Kinesis Data Streams Service with Amazon CloudWatch](https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html).
```python
# Example automatically generated without compilation. See https://github.com/aws/jsii/issues/826
stream = Stream(stack, "MyStream")
# Using base metric method passing the metric name
stream.metric("GetRecords.Success")
# using pre-defined metric method
stream.metric_get_records_success()
# using pre-defined and overriding the statistic
stream.metric_get_records_success(statistic="Maximum")
```
'''
import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from ._jsii import *
import aws_cdk.aws_cloudwatch
import aws_cdk.aws_iam
import aws_cdk.aws_kms
import aws_cdk.core
import constructs
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStream(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.CfnStream",
):
'''A CloudFormation ``AWS::Kinesis::Stream``.
:cloudformationResource: AWS::Kinesis::Stream
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
shard_count: jsii.Number,
name: typing.Optional[builtins.str] = None,
retention_period_hours: typing.Optional[jsii.Number] = None,
stream_encryption: typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]] = None,
tags: typing.Optional[typing.Sequence[aws_cdk.core.CfnTag]] = None,
) -> None:
'''Create a new ``AWS::Kinesis::Stream``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param shard_count: ``AWS::Kinesis::Stream.ShardCount``.
:param name: ``AWS::Kinesis::Stream.Name``.
:param retention_period_hours: ``AWS::Kinesis::Stream.RetentionPeriodHours``.
:param stream_encryption: ``AWS::Kinesis::Stream.StreamEncryption``.
:param tags: ``AWS::Kinesis::Stream.Tags``.
'''
props = CfnStreamProps(
shard_count=shard_count,
name=name,
retention_period_hours=retention_period_hours,
stream_encryption=stream_encryption,
tags=tags,
)
jsii.create(CfnStream, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrArn")
def attr_arn(self) -> builtins.str:
'''
:cloudformationAttribute: Arn
'''
return typing.cast(builtins.str, jsii.get(self, "attrArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="tags")
def tags(self) -> aws_cdk.core.TagManager:
'''``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags
'''
return typing.cast(aws_cdk.core.TagManager, jsii.get(self, "tags"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="shardCount")
def shard_count(self) -> jsii.Number:
'''``AWS::Kinesis::Stream.ShardCount``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount
'''
return typing.cast(jsii.Number, jsii.get(self, "shardCount"))
@shard_count.setter
def shard_count(self, value: jsii.Number) -> None:
jsii.set(self, "shardCount", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="name")
def name(self) -> typing.Optional[builtins.str]:
'''``AWS::Kinesis::Stream.Name``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name
'''
return typing.cast(typing.Optional[builtins.str], jsii.get(self, "name"))
@name.setter
def name(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "name", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="retentionPeriodHours")
def retention_period_hours(self) -> typing.Optional[jsii.Number]:
'''``AWS::Kinesis::Stream.RetentionPeriodHours``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours
'''
return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "retentionPeriodHours"))
@retention_period_hours.setter
def retention_period_hours(self, value: typing.Optional[jsii.Number]) -> None:
jsii.set(self, "retentionPeriodHours", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamEncryption")
def stream_encryption(
self,
) -> typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]]:
'''``AWS::Kinesis::Stream.StreamEncryption``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-streamencryption
'''
return typing.cast(typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]], jsii.get(self, "streamEncryption"))
@stream_encryption.setter
def stream_encryption(
self,
value: typing.Optional[typing.Union["CfnStream.StreamEncryptionProperty", aws_cdk.core.IResolvable]],
) -> None:
jsii.set(self, "streamEncryption", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStream.StreamEncryptionProperty",
jsii_struct_bases=[],
name_mapping={"encryption_type": "encryptionType", "key_id": "keyId"},
)
class StreamEncryptionProperty:
def __init__(
self,
*,
encryption_type: builtins.str,
key_id: builtins.str,
) -> None:
'''
:param encryption_type: ``CfnStream.StreamEncryptionProperty.EncryptionType``.
:param key_id: ``CfnStream.StreamEncryptionProperty.KeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html
'''
self._values: typing.Dict[str, typing.Any] = {
"encryption_type": encryption_type,
"key_id": key_id,
}
@builtins.property
def encryption_type(self) -> builtins.str:
'''``CfnStream.StreamEncryptionProperty.EncryptionType``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html#cfn-kinesis-stream-streamencryption-encryptiontype
'''
result = self._values.get("encryption_type")
assert result is not None, "Required property 'encryption_type' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def key_id(self) -> builtins.str:
'''``CfnStream.StreamEncryptionProperty.KeyId``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesis-stream-streamencryption.html#cfn-kinesis-stream-streamencryption-keyid
'''
result = self._values.get("key_id")
assert result is not None, "Required property 'key_id' is missing"
return typing.cast(builtins.str, result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamEncryptionProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(aws_cdk.core.IInspectable)
class CfnStreamConsumer(
aws_cdk.core.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.CfnStreamConsumer",
):
'''A CloudFormation ``AWS::Kinesis::StreamConsumer``.
:cloudformationResource: AWS::Kinesis::StreamConsumer
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html
'''
def __init__(
self,
scope: aws_cdk.core.Construct,
id: builtins.str,
*,
consumer_name: builtins.str,
stream_arn: builtins.str,
) -> None:
'''Create a new ``AWS::Kinesis::StreamConsumer``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param consumer_name: ``AWS::Kinesis::StreamConsumer.ConsumerName``.
:param stream_arn: ``AWS::Kinesis::StreamConsumer.StreamARN``.
'''
props = CfnStreamConsumerProps(
consumer_name=consumer_name, stream_arn=stream_arn
)
jsii.create(CfnStreamConsumer, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: aws_cdk.core.TreeInspector) -> None:
'''Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
'''
return typing.cast(None, jsii.invoke(self, "inspect", [inspector]))
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
'''
:param props: -
'''
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "renderProperties", [props]))
@jsii.python.classproperty # type: ignore[misc]
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerArn")
def attr_consumer_arn(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerARN
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerCreationTimestamp")
def attr_consumer_creation_timestamp(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerCreationTimestamp
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerCreationTimestamp"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerName")
def attr_consumer_name(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerName
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrConsumerStatus")
def attr_consumer_status(self) -> builtins.str:
'''
:cloudformationAttribute: ConsumerStatus
'''
return typing.cast(builtins.str, jsii.get(self, "attrConsumerStatus"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="attrStreamArn")
def attr_stream_arn(self) -> builtins.str:
'''
:cloudformationAttribute: StreamARN
'''
return typing.cast(builtins.str, jsii.get(self, "attrStreamArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.get(self, "cfnProperties"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="consumerName")
def consumer_name(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.ConsumerName``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-consumername
'''
return typing.cast(builtins.str, jsii.get(self, "consumerName"))
@consumer_name.setter
def consumer_name(self, value: builtins.str) -> None:
jsii.set(self, "consumerName", value)
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.StreamARN``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-streamarn
'''
return typing.cast(builtins.str, jsii.get(self, "streamArn"))
@stream_arn.setter
def stream_arn(self, value: builtins.str) -> None:
jsii.set(self, "streamArn", value)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStreamConsumerProps",
jsii_struct_bases=[],
name_mapping={"consumer_name": "consumerName", "stream_arn": "streamArn"},
)
class CfnStreamConsumerProps:
def __init__(
self,
*,
consumer_name: builtins.str,
stream_arn: builtins.str,
) -> None:
'''Properties for defining a ``AWS::Kinesis::StreamConsumer``.
:param consumer_name: ``AWS::Kinesis::StreamConsumer.ConsumerName``.
:param stream_arn: ``AWS::Kinesis::StreamConsumer.StreamARN``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html
'''
self._values: typing.Dict[str, typing.Any] = {
"consumer_name": consumer_name,
"stream_arn": stream_arn,
}
@builtins.property
def consumer_name(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.ConsumerName``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-consumername
'''
result = self._values.get("consumer_name")
assert result is not None, "Required property 'consumer_name' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def stream_arn(self) -> builtins.str:
'''``AWS::Kinesis::StreamConsumer.StreamARN``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-streamconsumer.html#cfn-kinesis-streamconsumer-streamarn
'''
result = self._values.get("stream_arn")
assert result is not None, "Required property 'stream_arn' is missing"
return typing.cast(builtins.str, result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnStreamConsumerProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.CfnStreamProps",
jsii_struct_bases=[],
name_mapping={
"shard_count": "shardCount",
"name": "name",
"retention_period_hours": "retentionPeriodHours",
"stream_encryption": "streamEncryption",
"tags": "tags",
},
)
class CfnStreamProps:
def __init__(
self,
*,
shard_count: jsii.Number,
name: typing.Optional[builtins.str] = None,
retention_period_hours: typing.Optional[jsii.Number] = None,
stream_encryption: typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.core.IResolvable]] = None,
tags: typing.Optional[typing.Sequence[aws_cdk.core.CfnTag]] = None,
) -> None:
'''Properties for defining a ``AWS::Kinesis::Stream``.
:param shard_count: ``AWS::Kinesis::Stream.ShardCount``.
:param name: ``AWS::Kinesis::Stream.Name``.
:param retention_period_hours: ``AWS::Kinesis::Stream.RetentionPeriodHours``.
:param stream_encryption: ``AWS::Kinesis::Stream.StreamEncryption``.
:param tags: ``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html
'''
self._values: typing.Dict[str, typing.Any] = {
"shard_count": shard_count,
}
if name is not None:
self._values["name"] = name
if retention_period_hours is not None:
self._values["retention_period_hours"] = retention_period_hours
if stream_encryption is not None:
self._values["stream_encryption"] = stream_encryption
if tags is not None:
self._values["tags"] = tags
@builtins.property
def shard_count(self) -> jsii.Number:
'''``AWS::Kinesis::Stream.ShardCount``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-shardcount
'''
result = self._values.get("shard_count")
assert result is not None, "Required property 'shard_count' is missing"
return typing.cast(jsii.Number, result)
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
'''``AWS::Kinesis::Stream.Name``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-name
'''
result = self._values.get("name")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def retention_period_hours(self) -> typing.Optional[jsii.Number]:
'''``AWS::Kinesis::Stream.RetentionPeriodHours``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-retentionperiodhours
'''
result = self._values.get("retention_period_hours")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def stream_encryption(
self,
) -> typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.core.IResolvable]]:
'''``AWS::Kinesis::Stream.StreamEncryption``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-streamencryption
'''
result = self._values.get("stream_encryption")
return typing.cast(typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.core.IResolvable]], result)
@builtins.property
def tags(self) -> typing.Optional[typing.List[aws_cdk.core.CfnTag]]:
'''``AWS::Kinesis::Stream.Tags``.
:link: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesis-stream.html#cfn-kinesis-stream-tags
'''
result = self._values.get("tags")
return typing.cast(typing.Optional[typing.List[aws_cdk.core.CfnTag]], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnStreamProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.interface(jsii_type="@aws-cdk/aws-kinesis.IStream")
class IStream(aws_cdk.core.IResource, typing_extensions.Protocol):
'''A Kinesis Stream.'''
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamName")
def stream_name(self) -> builtins.str:
'''The name of the stream.
:attribute: true
'''
...
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="encryptionKey")
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''Optional KMS encryption key associated with this stream.'''
...
@jsii.member(jsii_name="grant")
def grant(
self,
grantee: aws_cdk.aws_iam.IGrantable,
*actions: builtins.str,
) -> aws_cdk.aws_iam.Grant:
'''Grant the indicated permissions on this stream to the provided IAM principal.
:param grantee: -
:param actions: -
'''
...
@jsii.member(jsii_name="grantRead")
def grant_read(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant read permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to decrypt the
contents of the stream will also be granted.
:param grantee: -
'''
...
@jsii.member(jsii_name="grantReadWrite")
def grant_read_write(
self,
grantee: aws_cdk.aws_iam.IGrantable,
) -> aws_cdk.aws_iam.Grant:
'''Grants read/write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to use the key for
encrypt/decrypt will also be granted.
:param grantee: -
'''
...
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to encrypt the
contents of the stream will also be granted.
:param grantee: -
'''
...
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''Return stream metric based from its metric name.
:param metric_name: name of the stream metric.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per GetRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricGetRecordsSuccess")
def metric_get_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricIncomingBytes")
def metric_incoming_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes successfully put to the Kinesis stream over the specified time period.
This metric includes
bytes from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the bytes in a
single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricIncomingRecords")
def metric_incoming_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records successfully put to the Kinesis stream over the specified time period.
This metric includes
record counts from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the
records in a single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsLatency")
def metric_put_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsSuccess")
def metric_put_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of PutRecords operations where at least one record succeeded, per Kinesis stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsSuccessfulRecords")
def metric_put_records_successful_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful records in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsThrottledRecords")
def metric_put_records_throttled_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordsTotalRecords")
def metric_put_records_total_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The total number of records sent in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricPutRecordSuccess")
def metric_put_record_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful PutRecord operations per Kinesis stream, measured over the specified time period.
Average
reflects the percentage of successful writes to a stream.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricReadProvisionedThroughputExceeded")
def metric_read_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of GetRecords calls throttled for the stream over the specified time period.
The most commonly used
statistic for this metric is Average.
When the Minimum statistic has a value of 1, all records were throttled for the stream during the specified time
period.
When the Maximum statistic has a value of 0 (zero), no records were throttled for the stream during the specified
time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
@jsii.member(jsii_name="metricWriteProvisionedThroughputExceeded")
def metric_write_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling for the stream over the specified time period.
This metric
includes throttling from PutRecord and PutRecords operations.
When the Minimum statistic has a non-zero value, records were being throttled for the stream during the specified
time period.
When the Maximum statistic has a value of 0 (zero), no records were being throttled for the stream during the
specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
...
class _IStreamProxy(
jsii.proxy_for(aws_cdk.core.IResource) # type: ignore[misc]
):
'''A Kinesis Stream.'''
__jsii_type__: typing.ClassVar[str] = "@aws-cdk/aws-kinesis.IStream"
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "streamArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamName")
def stream_name(self) -> builtins.str:
'''The name of the stream.
:attribute: true
'''
return typing.cast(builtins.str, jsii.get(self, "streamName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="encryptionKey")
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''Optional KMS encryption key associated with this stream.'''
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], jsii.get(self, "encryptionKey"))
@jsii.member(jsii_name="grant")
def grant(
self,
grantee: aws_cdk.aws_iam.IGrantable,
*actions: builtins.str,
) -> aws_cdk.aws_iam.Grant:
'''Grant the indicated permissions on this stream to the provided IAM principal.
:param grantee: -
:param actions: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grant", [grantee, *actions]))
@jsii.member(jsii_name="grantRead")
def grant_read(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant read permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to decrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantRead", [grantee]))
@jsii.member(jsii_name="grantReadWrite")
def grant_read_write(
self,
grantee: aws_cdk.aws_iam.IGrantable,
) -> aws_cdk.aws_iam.Grant:
'''Grants read/write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to use the key for
encrypt/decrypt will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantReadWrite", [grantee]))
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to encrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantWrite", [grantee]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''Return stream metric based from its metric name.
:param metric_name: name of the stream metric.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metric", [metric_name, props]))
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecords", [props]))
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsBytes", [props]))
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsIteratorAgeMilliseconds", [props]))
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per GetRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsLatency", [props]))
@jsii.member(jsii_name="metricGetRecordsSuccess")
def metric_get_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsSuccess", [props]))
@jsii.member(jsii_name="metricIncomingBytes")
def metric_incoming_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes successfully put to the Kinesis stream over the specified time period.
This metric includes
bytes from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the bytes in a
single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingBytes", [props]))
@jsii.member(jsii_name="metricIncomingRecords")
def metric_incoming_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records successfully put to the Kinesis stream over the specified time period.
This metric includes
record counts from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the
records in a single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingRecords", [props]))
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordBytes", [props]))
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsBytes", [props]))
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsFailedRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsLatency")
def metric_put_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccess")
def metric_put_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of PutRecords operations where at least one record succeeded, per Kinesis stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccess", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccessfulRecords")
def metric_put_records_successful_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful records in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccessfulRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsThrottledRecords")
def metric_put_records_throttled_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsThrottledRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsTotalRecords")
def metric_put_records_total_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The total number of records sent in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsTotalRecords", [props]))
@jsii.member(jsii_name="metricPutRecordSuccess")
def metric_put_record_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful PutRecord operations per Kinesis stream, measured over the specified time period.
Average
reflects the percentage of successful writes to a stream.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordSuccess", [props]))
@jsii.member(jsii_name="metricReadProvisionedThroughputExceeded")
def metric_read_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of GetRecords calls throttled for the stream over the specified time period.
The most commonly used
statistic for this metric is Average.
When the Minimum statistic has a value of 1, all records were throttled for the stream during the specified time
period.
When the Maximum statistic has a value of 0 (zero), no records were throttled for the stream during the specified
time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricReadProvisionedThroughputExceeded", [props]))
@jsii.member(jsii_name="metricWriteProvisionedThroughputExceeded")
def metric_write_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling for the stream over the specified time period.
This metric
includes throttling from PutRecord and PutRecords operations.
When the Minimum statistic has a non-zero value, records were being throttled for the stream during the specified
time period.
When the Maximum statistic has a value of 0 (zero), no records were being throttled for the stream during the
specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricWriteProvisionedThroughputExceeded", [props]))
# Adding a "__jsii_proxy_class__(): typing.Type" function to the interface
typing.cast(typing.Any, IStream).__jsii_proxy_class__ = lambda : _IStreamProxy
@jsii.implements(IStream)
class Stream(
aws_cdk.core.Resource,
metaclass=jsii.JSIIMeta,
jsii_type="@aws-cdk/aws-kinesis.Stream",
):
'''A Kinesis stream.
Can be encrypted with a KMS key.
'''
def __init__(
self,
scope: constructs.Construct,
id: builtins.str,
*,
encryption: typing.Optional["StreamEncryption"] = None,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
retention_period: typing.Optional[aws_cdk.core.Duration] = None,
shard_count: typing.Optional[jsii.Number] = None,
stream_name: typing.Optional[builtins.str] = None,
) -> None:
'''
:param scope: -
:param id: -
:param encryption: The kind of server-side encryption to apply to this stream. If you choose KMS, you can specify a KMS key via ``encryptionKey``. If encryption key is not specified, a key will automatically be created. Default: - StreamEncryption.KMS if encrypted Streams are supported in the region or StreamEncryption.UNENCRYPTED otherwise. StreamEncryption.KMS if an encryption key is supplied through the encryptionKey property
:param encryption_key: External KMS key to use for stream encryption. The 'encryption' property must be set to "Kms". Default: - Kinesis Data Streams master key ('/alias/aws/kinesis'). If encryption is set to StreamEncryption.KMS and this property is undefined, a new KMS key will be created and associated with this stream.
:param retention_period: The number of hours for the data records that are stored in shards to remain accessible. Default: Duration.hours(24)
:param shard_count: The number of shards for the stream. Default: 1
:param stream_name: Enforces a particular physical stream name. Default:
'''
props = StreamProps(
encryption=encryption,
encryption_key=encryption_key,
retention_period=retention_period,
shard_count=shard_count,
stream_name=stream_name,
)
jsii.create(Stream, self, [scope, id, props])
@jsii.member(jsii_name="fromStreamArn") # type: ignore[misc]
@builtins.classmethod
def from_stream_arn(
cls,
scope: constructs.Construct,
id: builtins.str,
stream_arn: builtins.str,
) -> IStream:
'''Import an existing Kinesis Stream provided an ARN.
:param scope: The parent creating construct (usually ``this``).
:param id: The construct's name.
:param stream_arn: Stream ARN (i.e. arn:aws:kinesis:::stream/Foo).
'''
return typing.cast(IStream, jsii.sinvoke(cls, "fromStreamArn", [scope, id, stream_arn]))
@jsii.member(jsii_name="fromStreamAttributes") # type: ignore[misc]
@builtins.classmethod
def from_stream_attributes(
cls,
scope: constructs.Construct,
id: builtins.str,
*,
stream_arn: builtins.str,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
) -> IStream:
'''Creates a Stream construct that represents an external stream.
:param scope: The parent creating construct (usually ``this``).
:param id: The construct's name.
:param stream_arn: The ARN of the stream.
:param encryption_key: The KMS key securing the contents of the stream if encryption is enabled. Default: - No encryption
'''
attrs = StreamAttributes(stream_arn=stream_arn, encryption_key=encryption_key)
return typing.cast(IStream, jsii.sinvoke(cls, "fromStreamAttributes", [scope, id, attrs]))
@jsii.member(jsii_name="grant")
def grant(
self,
grantee: aws_cdk.aws_iam.IGrantable,
*actions: builtins.str,
) -> aws_cdk.aws_iam.Grant:
'''Grant the indicated permissions on this stream to the given IAM principal (Role/Group/User).
:param grantee: -
:param actions: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grant", [grantee, *actions]))
@jsii.member(jsii_name="grantRead")
def grant_read(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant read permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to decrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantRead", [grantee]))
@jsii.member(jsii_name="grantReadWrite")
def grant_read_write(
self,
grantee: aws_cdk.aws_iam.IGrantable,
) -> aws_cdk.aws_iam.Grant:
'''Grants read/write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to use the key for
encrypt/decrypt will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantReadWrite", [grantee]))
@jsii.member(jsii_name="grantWrite")
def grant_write(self, grantee: aws_cdk.aws_iam.IGrantable) -> aws_cdk.aws_iam.Grant:
'''Grant write permissions for this stream and its contents to an IAM principal (Role/Group/User).
If an encryption key is used, permission to ues the key to encrypt the
contents of the stream will also be granted.
:param grantee: -
'''
return typing.cast(aws_cdk.aws_iam.Grant, jsii.invoke(self, "grantWrite", [grantee]))
@jsii.member(jsii_name="metric")
def metric(
self,
metric_name: builtins.str,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''Return stream metric based from its metric name.
:param metric_name: name of the stream metric.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metric", [metric_name, props]))
@jsii.member(jsii_name="metricGetRecords")
def metric_get_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records retrieved from the shard, measured over the specified time period.
Minimum, Maximum, and
Average statistics represent the records in a single GetRecords operation for the stream in the specified time
period.
average
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecords", [props]))
@jsii.member(jsii_name="metricGetRecordsBytes")
def metric_get_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes retrieved from the Kinesis stream, measured over the specified time period.
Minimum, Maximum,
and Average statistics represent the bytes in a single GetRecords operation for the stream in the specified time
period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsBytes", [props]))
@jsii.member(jsii_name="metricGetRecordsIteratorAgeMilliseconds")
def metric_get_records_iterator_age_milliseconds(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The age of the last record in all GetRecords calls made against a Kinesis stream, measured over the specified time period.
Age is the difference between the current time and when the last record of the GetRecords call was written
to the stream. The Minimum and Maximum statistics can be used to track the progress of Kinesis consumer
applications. A value of zero indicates that the records being read are completely caught up with the stream.
The metric defaults to maximum over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsIteratorAgeMilliseconds", [props]))
@jsii.member(jsii_name="metricGetRecordsLatency")
def metric_get_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsLatency", [props]))
@jsii.member(jsii_name="metricGetRecordsSuccess")
def metric_get_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful GetRecords operations per stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricGetRecordsSuccess", [props]))
@jsii.member(jsii_name="metricIncomingBytes")
def metric_incoming_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes successfully put to the Kinesis stream over the specified time period.
This metric includes
bytes from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the bytes in a
single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingBytes", [props]))
@jsii.member(jsii_name="metricIncomingRecords")
def metric_incoming_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records successfully put to the Kinesis stream over the specified time period.
This metric includes
record counts from PutRecord and PutRecords operations. Minimum, Maximum, and Average statistics represent the
records in a single put operation for the stream in the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricIncomingRecords", [props]))
@jsii.member(jsii_name="metricPutRecordBytes")
def metric_put_record_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecord operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordBytes", [props]))
@jsii.member(jsii_name="metricPutRecordLatency")
def metric_put_record_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecord operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsBytes")
def metric_put_records_bytes(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of bytes put to the Kinesis stream using the PutRecords operation over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsBytes", [props]))
@jsii.member(jsii_name="metricPutRecordsFailedRecords")
def metric_put_records_failed_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to internal failures in a PutRecords operation per Kinesis data stream, measured over the specified time period.
Occasional internal failures are to be expected and should be retried.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsFailedRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsLatency")
def metric_put_records_latency(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The time taken per PutRecords operation, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsLatency", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccess")
def metric_put_records_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of PutRecords operations where at least one record succeeded, per Kinesis stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccess", [props]))
@jsii.member(jsii_name="metricPutRecordsSuccessfulRecords")
def metric_put_records_successful_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful records in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsSuccessfulRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsThrottledRecords")
def metric_put_records_throttled_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsThrottledRecords", [props]))
@jsii.member(jsii_name="metricPutRecordsTotalRecords")
def metric_put_records_total_records(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The total number of records sent in a PutRecords operation per Kinesis data stream, measured over the specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordsTotalRecords", [props]))
@jsii.member(jsii_name="metricPutRecordSuccess")
def metric_put_record_success(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of successful PutRecord operations per Kinesis stream, measured over the specified time period.
Average
reflects the percentage of successful writes to a stream.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricPutRecordSuccess", [props]))
@jsii.member(jsii_name="metricReadProvisionedThroughputExceeded")
def metric_read_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of GetRecords calls throttled for the stream over the specified time period.
The most commonly used
statistic for this metric is Average.
When the Minimum statistic has a value of 1, all records were throttled for the stream during the specified time
period.
When the Maximum statistic has a value of 0 (zero), no records were throttled for the stream during the specified
time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricReadProvisionedThroughputExceeded", [props]))
@jsii.member(jsii_name="metricWriteProvisionedThroughputExceeded")
def metric_write_provisioned_throughput_exceeded(
self,
*,
account: typing.Optional[builtins.str] = None,
color: typing.Optional[builtins.str] = None,
dimensions: typing.Optional[typing.Mapping[builtins.str, typing.Any]] = None,
dimensions_map: typing.Optional[typing.Mapping[builtins.str, builtins.str]] = None,
label: typing.Optional[builtins.str] = None,
period: typing.Optional[aws_cdk.core.Duration] = None,
region: typing.Optional[builtins.str] = None,
statistic: typing.Optional[builtins.str] = None,
unit: typing.Optional[aws_cdk.aws_cloudwatch.Unit] = None,
) -> aws_cdk.aws_cloudwatch.Metric:
'''The number of records rejected due to throttling for the stream over the specified time period.
This metric
includes throttling from PutRecord and PutRecords operations.
When the Minimum statistic has a non-zero value, records were being throttled for the stream during the specified
time period.
When the Maximum statistic has a value of 0 (zero), no records were being throttled for the stream during the
specified time period.
The metric defaults to average over 5 minutes, it can be changed by passing ``statistic`` and ``period`` properties.
:param account: Account which this metric comes from. Default: - Deployment account.
:param color: The hex color code, prefixed with '#' (e.g. '#00ff00'), to use when this metric is rendered on a graph. The ``Color`` class has a set of standard colors that can be used here. Default: - Automatic color
:param dimensions: (deprecated) Dimensions of the metric. Default: - No dimensions.
:param dimensions_map: Dimensions of the metric. Default: - No dimensions.
:param label: Label for this metric when added to a Graph in a Dashboard. Default: - No label
:param period: The period over which the specified statistic is applied. Default: Duration.minutes(5)
:param region: Region which this metric comes from. Default: - Deployment region.
:param statistic: What function to use for aggregating. Can be one of the following: - "Minimum" | "min" - "Maximum" | "max" - "Average" | "avg" - "Sum" | "sum" - "SampleCount | "n" - "pNN.NN" Default: Average
:param unit: Unit used to filter the metric stream. Only refer to datums emitted to the metric stream with the given unit and ignore all others. Only useful when datums are being emitted to the same metric stream under different units. The default is to use all matric datums in the stream, regardless of unit, which is recommended in nearly all cases. CloudWatch does not honor this property for graphs. Default: - All metric datums in the given metric stream
'''
props = aws_cdk.aws_cloudwatch.MetricOptions(
account=account,
color=color,
dimensions=dimensions,
dimensions_map=dimensions_map,
label=label,
period=period,
region=region,
statistic=statistic,
unit=unit,
)
return typing.cast(aws_cdk.aws_cloudwatch.Metric, jsii.invoke(self, "metricWriteProvisionedThroughputExceeded", [props]))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamArn")
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.'''
return typing.cast(builtins.str, jsii.get(self, "streamArn"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="streamName")
def stream_name(self) -> builtins.str:
'''The name of the stream.'''
return typing.cast(builtins.str, jsii.get(self, "streamName"))
@builtins.property # type: ignore[misc]
@jsii.member(jsii_name="encryptionKey")
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''Optional KMS encryption key associated with this stream.'''
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], jsii.get(self, "encryptionKey"))
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.StreamAttributes",
jsii_struct_bases=[],
name_mapping={"stream_arn": "streamArn", "encryption_key": "encryptionKey"},
)
class StreamAttributes:
def __init__(
self,
*,
stream_arn: builtins.str,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
) -> None:
'''A reference to a stream.
The easiest way to instantiate is to call
``stream.export()``. Then, the consumer can use ``Stream.import(this, ref)`` and
get a ``Stream``.
:param stream_arn: The ARN of the stream.
:param encryption_key: The KMS key securing the contents of the stream if encryption is enabled. Default: - No encryption
'''
self._values: typing.Dict[str, typing.Any] = {
"stream_arn": stream_arn,
}
if encryption_key is not None:
self._values["encryption_key"] = encryption_key
@builtins.property
def stream_arn(self) -> builtins.str:
'''The ARN of the stream.'''
result = self._values.get("stream_arn")
assert result is not None, "Required property 'stream_arn' is missing"
return typing.cast(builtins.str, result)
@builtins.property
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''The KMS key securing the contents of the stream if encryption is enabled.
:default: - No encryption
'''
result = self._values.get("encryption_key")
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamAttributes(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.enum(jsii_type="@aws-cdk/aws-kinesis.StreamEncryption")
class StreamEncryption(enum.Enum):
'''What kind of server-side encryption to apply to this stream.'''
UNENCRYPTED = "UNENCRYPTED"
'''Records in the stream are not encrypted.'''
KMS = "KMS"
'''Server-side encryption with a KMS key managed by the user.
If ``encryptionKey`` is specified, this key will be used, otherwise, one will be defined.
'''
MANAGED = "MANAGED"
'''Server-side encryption with a master key managed by Amazon Kinesis.'''
@jsii.data_type(
jsii_type="@aws-cdk/aws-kinesis.StreamProps",
jsii_struct_bases=[],
name_mapping={
"encryption": "encryption",
"encryption_key": "encryptionKey",
"retention_period": "retentionPeriod",
"shard_count": "shardCount",
"stream_name": "streamName",
},
)
class StreamProps:
def __init__(
self,
*,
encryption: typing.Optional[StreamEncryption] = None,
encryption_key: typing.Optional[aws_cdk.aws_kms.IKey] = None,
retention_period: typing.Optional[aws_cdk.core.Duration] = None,
shard_count: typing.Optional[jsii.Number] = None,
stream_name: typing.Optional[builtins.str] = None,
) -> None:
'''Properties for a Kinesis Stream.
:param encryption: The kind of server-side encryption to apply to this stream. If you choose KMS, you can specify a KMS key via ``encryptionKey``. If encryption key is not specified, a key will automatically be created. Default: - StreamEncryption.KMS if encrypted Streams are supported in the region or StreamEncryption.UNENCRYPTED otherwise. StreamEncryption.KMS if an encryption key is supplied through the encryptionKey property
:param encryption_key: External KMS key to use for stream encryption. The 'encryption' property must be set to "Kms". Default: - Kinesis Data Streams master key ('/alias/aws/kinesis'). If encryption is set to StreamEncryption.KMS and this property is undefined, a new KMS key will be created and associated with this stream.
:param retention_period: The number of hours for the data records that are stored in shards to remain accessible. Default: Duration.hours(24)
:param shard_count: The number of shards for the stream. Default: 1
:param stream_name: Enforces a particular physical stream name. Default:
'''
self._values: typing.Dict[str, typing.Any] = {}
if encryption is not None:
self._values["encryption"] = encryption
if encryption_key is not None:
self._values["encryption_key"] = encryption_key
if retention_period is not None:
self._values["retention_period"] = retention_period
if shard_count is not None:
self._values["shard_count"] = shard_count
if stream_name is not None:
self._values["stream_name"] = stream_name
@builtins.property
def encryption(self) -> typing.Optional[StreamEncryption]:
'''The kind of server-side encryption to apply to this stream.
If you choose KMS, you can specify a KMS key via ``encryptionKey``. If
encryption key is not specified, a key will automatically be created.
:default:
- StreamEncryption.KMS if encrypted Streams are supported in the region
or StreamEncryption.UNENCRYPTED otherwise.
StreamEncryption.KMS if an encryption key is supplied through the encryptionKey property
'''
result = self._values.get("encryption")
return typing.cast(typing.Optional[StreamEncryption], result)
@builtins.property
def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:
'''External KMS key to use for stream encryption.
The 'encryption' property must be set to "Kms".
:default:
- Kinesis Data Streams master key ('/alias/aws/kinesis').
If encryption is set to StreamEncryption.KMS and this property is undefined, a new KMS key
will be created and associated with this stream.
'''
result = self._values.get("encryption_key")
return typing.cast(typing.Optional[aws_cdk.aws_kms.IKey], result)
@builtins.property
def retention_period(self) -> typing.Optional[aws_cdk.core.Duration]:
'''The number of hours for the data records that are stored in shards to remain accessible.
:default: Duration.hours(24)
'''
result = self._values.get("retention_period")
return typing.cast(typing.Optional[aws_cdk.core.Duration], result)
@builtins.property
def shard_count(self) -> typing.Optional[jsii.Number]:
'''The number of shards for the stream.
:default: 1
'''
result = self._values.get("shard_count")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def stream_name(self) -> typing.Optional[builtins.str]:
'''Enforces a particular physical stream name.
:default:
'''
result = self._values.get("stream_name")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "StreamProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CfnStream",
"CfnStreamConsumer",
"CfnStreamConsumerProps",
"CfnStreamProps",
"IStream",
"Stream",
"StreamAttributes",
"StreamEncryption",
"StreamProps",
]
publication.publish()
| [
"publication.publish",
"jsii.create",
"jsii.invoke",
"jsii.interface",
"jsii.implements",
"jsii.sget",
"jsii.proxy_for",
"jsii.set",
"jsii.data_type",
"jsii.sinvoke",
"jsii.enum",
"jsii.get",
"jsii.member",
"typing.cast"
] | [((7926, 7968), 'jsii.implements', 'jsii.implements', (['aws_cdk.core.IInspectable'], {}), '(aws_cdk.core.IInspectable)\n', (7941, 7968), False, 'import jsii\n'), ((16464, 16506), 'jsii.implements', 'jsii.implements', (['aws_cdk.core.IInspectable'], {}), '(aws_cdk.core.IInspectable)\n', (16479, 16506), False, 'import jsii\n'), ((21499, 21675), 'jsii.data_type', 'jsii.data_type', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.CfnStreamConsumerProps"""', 'jsii_struct_bases': '[]', 'name_mapping': "{'consumer_name': 'consumerName', 'stream_arn': 'streamArn'}"}), "(jsii_type='@aws-cdk/aws-kinesis.CfnStreamConsumerProps',\n jsii_struct_bases=[], name_mapping={'consumer_name': 'consumerName',\n 'stream_arn': 'streamArn'})\n", (21513, 21675), False, 'import jsii\n'), ((23676, 23940), 'jsii.data_type', 'jsii.data_type', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.CfnStreamProps"""', 'jsii_struct_bases': '[]', 'name_mapping': "{'shard_count': 'shardCount', 'name': 'name', 'retention_period_hours':\n 'retentionPeriodHours', 'stream_encryption': 'streamEncryption', 'tags':\n 'tags'}"}), "(jsii_type='@aws-cdk/aws-kinesis.CfnStreamProps',\n jsii_struct_bases=[], name_mapping={'shard_count': 'shardCount', 'name':\n 'name', 'retention_period_hours': 'retentionPeriodHours',\n 'stream_encryption': 'streamEncryption', 'tags': 'tags'})\n", (23690, 23940), False, 'import jsii\n'), ((28040, 28096), 'jsii.interface', 'jsii.interface', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.IStream"""'}), "(jsii_type='@aws-cdk/aws-kinesis.IStream')\n", (28054, 28096), False, 'import jsii\n'), ((82269, 82307), 'jsii.proxy_for', 'jsii.proxy_for', (['aws_cdk.core.IResource'], {}), '(aws_cdk.core.IResource)\n', (82283, 82307), False, 'import jsii\n'), ((145895, 145919), 'jsii.implements', 'jsii.implements', (['IStream'], {}), '(IStream)\n', (145910, 145919), False, 'import jsii\n'), ((212796, 212968), 'jsii.data_type', 'jsii.data_type', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.StreamAttributes"""', 'jsii_struct_bases': '[]', 'name_mapping': "{'stream_arn': 'streamArn', 'encryption_key': 'encryptionKey'}"}), "(jsii_type='@aws-cdk/aws-kinesis.StreamAttributes',\n jsii_struct_bases=[], name_mapping={'stream_arn': 'streamArn',\n 'encryption_key': 'encryptionKey'})\n", (212810, 212968), False, 'import jsii\n'), ((214776, 214836), 'jsii.enum', 'jsii.enum', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.StreamEncryption"""'}), "(jsii_type='@aws-cdk/aws-kinesis.StreamEncryption')\n", (214785, 214836), False, 'import jsii\n'), ((215317, 215590), 'jsii.data_type', 'jsii.data_type', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.StreamProps"""', 'jsii_struct_bases': '[]', 'name_mapping': "{'encryption': 'encryption', 'encryption_key': 'encryptionKey',\n 'retention_period': 'retentionPeriod', 'shard_count': 'shardCount',\n 'stream_name': 'streamName'}"}), "(jsii_type='@aws-cdk/aws-kinesis.StreamProps',\n jsii_struct_bases=[], name_mapping={'encryption': 'encryption',\n 'encryption_key': 'encryptionKey', 'retention_period':\n 'retentionPeriod', 'shard_count': 'shardCount', 'stream_name':\n 'streamName'})\n", (215331, 215590), False, 'import jsii\n'), ((220510, 220531), 'publication.publish', 'publication.publish', ([], {}), '()\n', (220529, 220531), False, 'import publication\n'), ((9600, 9632), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""inspect"""'}), "(jsii_name='inspect')\n", (9611, 9632), False, 'import jsii\n'), ((9950, 9991), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""renderProperties"""'}), "(jsii_name='renderProperties')\n", (9961, 9991), False, 'import jsii\n'), ((10365, 10412), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""CFN_RESOURCE_TYPE_NAME"""'}), "(jsii_name='CFN_RESOURCE_TYPE_NAME')\n", (10376, 10412), False, 'import jsii\n'), ((10676, 10708), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""attrArn"""'}), "(jsii_name='attrArn')\n", (10687, 10708), False, 'import jsii\n'), ((10929, 10967), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""cfnProperties"""'}), "(jsii_name='cfnProperties')\n", (10940, 10967), False, 'import jsii\n'), ((11195, 11224), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""tags"""'}), "(jsii_name='tags')\n", (11206, 11224), False, 'import jsii\n'), ((11587, 11622), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""shardCount"""'}), "(jsii_name='shardCount')\n", (11598, 11622), False, 'import jsii\n'), ((12110, 12139), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""name"""'}), "(jsii_name='name')\n", (12121, 12139), False, 'import jsii\n'), ((12636, 12681), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""retentionPeriodHours"""'}), "(jsii_name='retentionPeriodHours')\n", (12647, 12681), False, 'import jsii\n'), ((13293, 13334), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamEncryption"""'}), "(jsii_name='streamEncryption')\n", (13304, 13334), False, 'import jsii\n'), ((14104, 14293), 'jsii.data_type', 'jsii.data_type', ([], {'jsii_type': '"""@aws-cdk/aws-kinesis.CfnStream.StreamEncryptionProperty"""', 'jsii_struct_bases': '[]', 'name_mapping': "{'encryption_type': 'encryptionType', 'key_id': 'keyId'}"}), "(jsii_type=\n '@aws-cdk/aws-kinesis.CfnStream.StreamEncryptionProperty',\n jsii_struct_bases=[], name_mapping={'encryption_type': 'encryptionType',\n 'key_id': 'keyId'})\n", (14118, 14293), False, 'import jsii\n'), ((17601, 17633), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""inspect"""'}), "(jsii_name='inspect')\n", (17612, 17633), False, 'import jsii\n'), ((17951, 17992), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""renderProperties"""'}), "(jsii_name='renderProperties')\n", (17962, 17992), False, 'import jsii\n'), ((18366, 18413), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""CFN_RESOURCE_TYPE_NAME"""'}), "(jsii_name='CFN_RESOURCE_TYPE_NAME')\n", (18377, 18413), False, 'import jsii\n'), ((18677, 18717), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""attrConsumerArn"""'}), "(jsii_name='attrConsumerArn')\n", (18688, 18717), False, 'import jsii\n'), ((18963, 19017), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""attrConsumerCreationTimestamp"""'}), "(jsii_name='attrConsumerCreationTimestamp')\n", (18974, 19017), False, 'import jsii\n'), ((19306, 19347), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""attrConsumerName"""'}), "(jsii_name='attrConsumerName')\n", (19317, 19347), False, 'import jsii\n'), ((19596, 19639), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""attrConsumerStatus"""'}), "(jsii_name='attrConsumerStatus')\n", (19607, 19639), False, 'import jsii\n'), ((19894, 19932), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""attrStreamArn"""'}), "(jsii_name='attrStreamArn')\n", (19905, 19932), False, 'import jsii\n'), ((20172, 20210), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""cfnProperties"""'}), "(jsii_name='cfnProperties')\n", (20183, 20210), False, 'import jsii\n'), ((20438, 20475), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""consumerName"""'}), "(jsii_name='consumerName')\n", (20449, 20475), False, 'import jsii\n'), ((21004, 21038), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamArn"""'}), "(jsii_name='streamArn')\n", (21015, 21038), False, 'import jsii\n'), ((28242, 28276), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamArn"""'}), "(jsii_name='streamArn')\n", (28253, 28276), False, 'import jsii\n'), ((28453, 28488), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamName"""'}), "(jsii_name='streamName')\n", (28464, 28488), False, 'import jsii\n'), ((28667, 28705), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""encryptionKey"""'}), "(jsii_name='encryptionKey')\n", (28678, 28705), False, 'import jsii\n'), ((28866, 28896), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grant"""'}), "(jsii_name='grant')\n", (28877, 28896), False, 'import jsii\n'), ((29207, 29241), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantRead"""'}), "(jsii_name='grantRead')\n", (29218, 29241), False, 'import jsii\n'), ((29626, 29665), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantReadWrite"""'}), "(jsii_name='grantReadWrite')\n", (29637, 29665), False, 'import jsii\n'), ((30068, 30103), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantWrite"""'}), "(jsii_name='grantWrite')\n", (30079, 30103), False, 'import jsii\n'), ((30490, 30521), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metric"""'}), "(jsii_name='metric')\n", (30501, 30521), False, 'import jsii\n'), ((32846, 32887), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecords"""'}), "(jsii_name='metricGetRecords')\n", (32857, 32887), False, 'import jsii\n'), ((35466, 35512), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsBytes"""'}), "(jsii_name='metricGetRecordsBytes')\n", (35477, 35512), False, 'import jsii\n'), ((38102, 38166), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsIteratorAgeMilliseconds"""'}), "(jsii_name='metricGetRecordsIteratorAgeMilliseconds')\n", (38113, 38166), False, 'import jsii\n'), ((40985, 41033), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsLatency"""'}), "(jsii_name='metricGetRecordsLatency')\n", (40996, 41033), False, 'import jsii\n'), ((43447, 43495), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsSuccess"""'}), "(jsii_name='metricGetRecordsSuccess')\n", (43458, 43495), False, 'import jsii\n'), ((45927, 45971), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricIncomingBytes"""'}), "(jsii_name='metricIncomingBytes')\n", (45938, 45971), False, 'import jsii\n'), ((48615, 48661), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricIncomingRecords"""'}), "(jsii_name='metricIncomingRecords')\n", (48626, 48661), False, 'import jsii\n'), ((51319, 51364), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordBytes"""'}), "(jsii_name='metricPutRecordBytes')\n", (51330, 51364), False, 'import jsii\n'), ((53801, 53848), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordLatency"""'}), "(jsii_name='metricPutRecordLatency')\n", (53812, 53848), False, 'import jsii\n'), ((56260, 56306), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsBytes"""'}), "(jsii_name='metricPutRecordsBytes')\n", (56271, 56306), False, 'import jsii\n'), ((58745, 58799), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsFailedRecords"""'}), "(jsii_name='metricPutRecordsFailedRecords')\n", (58756, 58799), False, 'import jsii\n'), ((61366, 61414), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsLatency"""'}), "(jsii_name='metricPutRecordsLatency')\n", (61377, 61414), False, 'import jsii\n'), ((63828, 63876), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsSuccess"""'}), "(jsii_name='metricPutRecordsSuccess')\n", (63839, 63876), False, 'import jsii\n'), ((66342, 66400), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsSuccessfulRecords"""'}), "(jsii_name='metricPutRecordsSuccessfulRecords')\n", (66353, 66400), False, 'import jsii\n'), ((68868, 68925), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsThrottledRecords"""'}), "(jsii_name='metricPutRecordsThrottledRecords')\n", (68879, 68925), False, 'import jsii\n'), ((71408, 71461), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsTotalRecords"""'}), "(jsii_name='metricPutRecordsTotalRecords')\n", (71419, 71461), False, 'import jsii\n'), ((73924, 73971), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordSuccess"""'}), "(jsii_name='metricPutRecordSuccess')\n", (73935, 73971), False, 'import jsii\n'), ((76492, 76556), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricReadProvisionedThroughputExceeded"""'}), "(jsii_name='metricReadProvisionedThroughputExceeded')\n", (76503, 76556), False, 'import jsii\n'), ((79352, 79417), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricWriteProvisionedThroughputExceeded"""'}), "(jsii_name='metricWriteProvisionedThroughputExceeded')\n", (79363, 79417), False, 'import jsii\n'), ((82484, 82518), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamArn"""'}), "(jsii_name='streamArn')\n", (82495, 82518), False, 'import jsii\n'), ((82753, 82788), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamName"""'}), "(jsii_name='streamName')\n", (82764, 82788), False, 'import jsii\n'), ((83026, 83064), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""encryptionKey"""'}), "(jsii_name='encryptionKey')\n", (83037, 83064), False, 'import jsii\n'), ((83312, 83342), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grant"""'}), "(jsii_name='grant')\n", (83323, 83342), False, 'import jsii\n'), ((83740, 83774), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantRead"""'}), "(jsii_name='grantRead')\n", (83751, 83774), False, 'import jsii\n'), ((84240, 84279), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantReadWrite"""'}), "(jsii_name='grantReadWrite')\n", (84251, 84279), False, 'import jsii\n'), ((84768, 84803), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantWrite"""'}), "(jsii_name='grantWrite')\n", (84779, 84803), False, 'import jsii\n'), ((85272, 85303), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metric"""'}), "(jsii_name='metric')\n", (85283, 85303), False, 'import jsii\n'), ((88057, 88098), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecords"""'}), "(jsii_name='metricGetRecords')\n", (88068, 88098), False, 'import jsii\n'), ((91103, 91149), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsBytes"""'}), "(jsii_name='metricGetRecordsBytes')\n", (91114, 91149), False, 'import jsii\n'), ((94170, 94234), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsIteratorAgeMilliseconds"""'}), "(jsii_name='metricGetRecordsIteratorAgeMilliseconds')\n", (94181, 94234), False, 'import jsii\n'), ((97502, 97550), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsLatency"""'}), "(jsii_name='metricGetRecordsLatency')\n", (97513, 97550), False, 'import jsii\n'), ((100397, 100445), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsSuccess"""'}), "(jsii_name='metricGetRecordsSuccess')\n", (100408, 100445), False, 'import jsii\n'), ((103310, 103354), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricIncomingBytes"""'}), "(jsii_name='metricIncomingBytes')\n", (103321, 103354), False, 'import jsii\n'), ((106427, 106473), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricIncomingRecords"""'}), "(jsii_name='metricIncomingRecords')\n", (106438, 106473), False, 'import jsii\n'), ((109562, 109607), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordBytes"""'}), "(jsii_name='metricPutRecordBytes')\n", (109573, 109607), False, 'import jsii\n'), ((112474, 112521), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordLatency"""'}), "(jsii_name='metricPutRecordLatency')\n", (112485, 112521), False, 'import jsii\n'), ((115365, 115411), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsBytes"""'}), "(jsii_name='metricPutRecordsBytes')\n", (115376, 115411), False, 'import jsii\n'), ((118281, 118335), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsFailedRecords"""'}), "(jsii_name='metricPutRecordsFailedRecords')\n", (118292, 118335), False, 'import jsii\n'), ((121341, 121389), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsLatency"""'}), "(jsii_name='metricPutRecordsLatency')\n", (121352, 121389), False, 'import jsii\n'), ((124236, 124284), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsSuccess"""'}), "(jsii_name='metricPutRecordsSuccess')\n", (124247, 124284), False, 'import jsii\n'), ((127183, 127241), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsSuccessfulRecords"""'}), "(jsii_name='metricPutRecordsSuccessfulRecords')\n", (127194, 127241), False, 'import jsii\n'), ((130152, 130209), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsThrottledRecords"""'}), "(jsii_name='metricPutRecordsThrottledRecords')\n", (130163, 130209), False, 'import jsii\n'), ((133134, 133187), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsTotalRecords"""'}), "(jsii_name='metricPutRecordsTotalRecords')\n", (133145, 133187), False, 'import jsii\n'), ((136088, 136135), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordSuccess"""'}), "(jsii_name='metricPutRecordSuccess')\n", (136099, 136135), False, 'import jsii\n'), ((139088, 139152), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricReadProvisionedThroughputExceeded"""'}), "(jsii_name='metricReadProvisionedThroughputExceeded')\n", (139099, 139152), False, 'import jsii\n'), ((142397, 142462), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricWriteProvisionedThroughputExceeded"""'}), "(jsii_name='metricWriteProvisionedThroughputExceeded')\n", (142408, 142462), False, 'import jsii\n'), ((145813, 145845), 'typing.cast', 'typing.cast', (['typing.Any', 'IStream'], {}), '(typing.Any, IStream)\n', (145824, 145845), False, 'import typing\n'), ((148005, 148043), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""fromStreamArn"""'}), "(jsii_name='fromStreamArn')\n", (148016, 148043), False, 'import jsii\n'), ((148610, 148655), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""fromStreamAttributes"""'}), "(jsii_name='fromStreamAttributes')\n", (148621, 148655), False, 'import jsii\n'), ((149517, 149547), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grant"""'}), "(jsii_name='grant')\n", (149528, 149547), False, 'import jsii\n'), ((149960, 149994), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantRead"""'}), "(jsii_name='grantRead')\n", (149971, 149994), False, 'import jsii\n'), ((150460, 150499), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantReadWrite"""'}), "(jsii_name='grantReadWrite')\n", (150471, 150499), False, 'import jsii\n'), ((150988, 151023), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""grantWrite"""'}), "(jsii_name='grantWrite')\n", (150999, 151023), False, 'import jsii\n'), ((151492, 151523), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metric"""'}), "(jsii_name='metric')\n", (151503, 151523), False, 'import jsii\n'), ((154277, 154318), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecords"""'}), "(jsii_name='metricGetRecords')\n", (154288, 154318), False, 'import jsii\n'), ((157339, 157385), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsBytes"""'}), "(jsii_name='metricGetRecordsBytes')\n", (157350, 157385), False, 'import jsii\n'), ((160406, 160470), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsIteratorAgeMilliseconds"""'}), "(jsii_name='metricGetRecordsIteratorAgeMilliseconds')\n", (160417, 160470), False, 'import jsii\n'), ((163738, 163786), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsLatency"""'}), "(jsii_name='metricGetRecordsLatency')\n", (163749, 163786), False, 'import jsii\n'), ((166651, 166699), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricGetRecordsSuccess"""'}), "(jsii_name='metricGetRecordsSuccess')\n", (166662, 166699), False, 'import jsii\n'), ((169564, 169608), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricIncomingBytes"""'}), "(jsii_name='metricIncomingBytes')\n", (169575, 169608), False, 'import jsii\n'), ((172681, 172727), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricIncomingRecords"""'}), "(jsii_name='metricIncomingRecords')\n", (172692, 172727), False, 'import jsii\n'), ((175816, 175861), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordBytes"""'}), "(jsii_name='metricPutRecordBytes')\n", (175827, 175861), False, 'import jsii\n'), ((178728, 178775), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordLatency"""'}), "(jsii_name='metricPutRecordLatency')\n", (178739, 178775), False, 'import jsii\n'), ((181619, 181665), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsBytes"""'}), "(jsii_name='metricPutRecordsBytes')\n", (181630, 181665), False, 'import jsii\n'), ((184535, 184589), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsFailedRecords"""'}), "(jsii_name='metricPutRecordsFailedRecords')\n", (184546, 184589), False, 'import jsii\n'), ((187595, 187643), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsLatency"""'}), "(jsii_name='metricPutRecordsLatency')\n", (187606, 187643), False, 'import jsii\n'), ((190490, 190538), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsSuccess"""'}), "(jsii_name='metricPutRecordsSuccess')\n", (190501, 190538), False, 'import jsii\n'), ((193437, 193495), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsSuccessfulRecords"""'}), "(jsii_name='metricPutRecordsSuccessfulRecords')\n", (193448, 193495), False, 'import jsii\n'), ((196406, 196463), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsThrottledRecords"""'}), "(jsii_name='metricPutRecordsThrottledRecords')\n", (196417, 196463), False, 'import jsii\n'), ((199388, 199441), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordsTotalRecords"""'}), "(jsii_name='metricPutRecordsTotalRecords')\n", (199399, 199441), False, 'import jsii\n'), ((202342, 202389), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricPutRecordSuccess"""'}), "(jsii_name='metricPutRecordSuccess')\n", (202353, 202389), False, 'import jsii\n'), ((205342, 205406), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricReadProvisionedThroughputExceeded"""'}), "(jsii_name='metricReadProvisionedThroughputExceeded')\n", (205353, 205406), False, 'import jsii\n'), ((208651, 208716), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""metricWriteProvisionedThroughputExceeded"""'}), "(jsii_name='metricWriteProvisionedThroughputExceeded')\n", (208662, 208716), False, 'import jsii\n'), ((212041, 212075), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamArn"""'}), "(jsii_name='streamArn')\n", (212052, 212075), False, 'import jsii\n'), ((212275, 212310), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""streamName"""'}), "(jsii_name='streamName')\n", (212286, 212310), False, 'import jsii\n'), ((212513, 212551), 'jsii.member', 'jsii.member', ([], {'jsii_name': '"""encryptionKey"""'}), "(jsii_name='encryptionKey')\n", (212524, 212551), False, 'import jsii\n'), ((9545, 9593), 'jsii.create', 'jsii.create', (['CfnStream', 'self', '[scope, id, props]'], {}), '(CfnStream, self, [scope, id, props])\n', (9556, 9593), False, 'import jsii\n'), ((12024, 12059), 'jsii.set', 'jsii.set', (['self', '"""shardCount"""', 'value'], {}), "(self, 'shardCount', value)\n", (12032, 12059), False, 'import jsii\n'), ((12556, 12585), 'jsii.set', 'jsii.set', (['self', '"""name"""', 'value'], {}), "(self, 'name', value)\n", (12564, 12585), False, 'import jsii\n'), ((13197, 13242), 'jsii.set', 'jsii.set', (['self', '"""retentionPeriodHours"""', 'value'], {}), "(self, 'retentionPeriodHours', value)\n", (13205, 13242), False, 'import jsii\n'), ((14056, 14097), 'jsii.set', 'jsii.set', (['self', '"""streamEncryption"""', 'value'], {}), "(self, 'streamEncryption', value)\n", (14064, 14097), False, 'import jsii\n'), ((17538, 17594), 'jsii.create', 'jsii.create', (['CfnStreamConsumer', 'self', '[scope, id, props]'], {}), '(CfnStreamConsumer, self, [scope, id, props])\n', (17549, 17594), False, 'import jsii\n'), ((20916, 20953), 'jsii.set', 'jsii.set', (['self', '"""consumerName"""', 'value'], {}), "(self, 'consumerName', value)\n", (20924, 20953), False, 'import jsii\n'), ((21461, 21495), 'jsii.set', 'jsii.set', (['self', '"""streamArn"""', 'value'], {}), "(self, 'streamArn', value)\n", (21469, 21495), False, 'import jsii\n'), ((22786, 22819), 'typing.cast', 'typing.cast', (['builtins.str', 'result'], {}), '(builtins.str, result)\n', (22797, 22819), False, 'import typing\n'), ((23251, 23284), 'typing.cast', 'typing.cast', (['builtins.str', 'result'], {}), '(builtins.str, result)\n', (23262, 23284), False, 'import typing\n'), ((25833, 25865), 'typing.cast', 'typing.cast', (['jsii.Number', 'result'], {}), '(jsii.Number, result)\n', (25844, 25865), False, 'import typing\n'), ((26189, 26239), 'typing.cast', 'typing.cast', (['typing.Optional[builtins.str]', 'result'], {}), '(typing.Optional[builtins.str], result)\n', (26200, 26239), False, 'import typing\n'), ((26630, 26679), 'typing.cast', 'typing.cast', (['typing.Optional[jsii.Number]', 'result'], {}), '(typing.Optional[jsii.Number], result)\n', (26641, 26679), False, 'import typing\n'), ((27130, 27246), 'typing.cast', 'typing.cast', (['typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty, aws_cdk.\n core.IResolvable]]', 'result'], {}), '(typing.Optional[typing.Union[CfnStream.StreamEncryptionProperty,\n aws_cdk.core.IResolvable]], result)\n', (27141, 27246), False, 'import typing\n'), ((27586, 27656), 'typing.cast', 'typing.cast', (['typing.Optional[typing.List[aws_cdk.core.CfnTag]]', 'result'], {}), '(typing.Optional[typing.List[aws_cdk.core.CfnTag]], result)\n', (27597, 27656), False, 'import typing\n'), ((147953, 147998), 'jsii.create', 'jsii.create', (['Stream', 'self', '[scope, id, props]'], {}), '(Stream, self, [scope, id, props])\n', (147964, 147998), False, 'import jsii\n'), ((214004, 214037), 'typing.cast', 'typing.cast', (['builtins.str', 'result'], {}), '(builtins.str, result)\n', (214015, 214037), False, 'import typing\n'), ((214332, 214390), 'typing.cast', 'typing.cast', (['typing.Optional[aws_cdk.aws_kms.IKey]', 'result'], {}), '(typing.Optional[aws_cdk.aws_kms.IKey], result)\n', (214343, 214390), False, 'import typing\n'), ((218341, 218395), 'typing.cast', 'typing.cast', (['typing.Optional[StreamEncryption]', 'result'], {}), '(typing.Optional[StreamEncryption], result)\n', (218352, 218395), False, 'import typing\n'), ((218927, 218985), 'typing.cast', 'typing.cast', (['typing.Optional[aws_cdk.aws_kms.IKey]', 'result'], {}), '(typing.Optional[aws_cdk.aws_kms.IKey], result)\n', (218938, 218985), False, 'import typing\n'), ((219303, 219362), 'typing.cast', 'typing.cast', (['typing.Optional[aws_cdk.core.Duration]', 'result'], {}), '(typing.Optional[aws_cdk.core.Duration], result)\n', (219314, 219362), False, 'import typing\n'), ((219591, 219640), 'typing.cast', 'typing.cast', (['typing.Optional[jsii.Number]', 'result'], {}), '(typing.Optional[jsii.Number], result)\n', (219602, 219640), False, 'import typing\n'), ((219875, 219925), 'typing.cast', 'typing.cast', (['typing.Optional[builtins.str]', 'result'], {}), '(typing.Optional[builtins.str], result)\n', (219886, 219925), False, 'import typing\n'), ((9901, 9942), 'jsii.invoke', 'jsii.invoke', (['self', '"""inspect"""', '[inspector]'], {}), "(self, 'inspect', [inspector])\n", (9912, 9942), False, 'import jsii\n'), ((10259, 10305), 'jsii.invoke', 'jsii.invoke', (['self', '"""renderProperties"""', '[props]'], {}), "(self, 'renderProperties', [props])\n", (10270, 10305), False, 'import jsii\n'), ((10584, 10624), 'jsii.sget', 'jsii.sget', (['cls', '"""CFN_RESOURCE_TYPE_NAME"""'], {}), "(cls, 'CFN_RESOURCE_TYPE_NAME')\n", (10593, 10624), False, 'import jsii\n'), ((10852, 10877), 'jsii.get', 'jsii.get', (['self', '"""attrArn"""'], {}), "(self, 'attrArn')\n", (10860, 10877), False, 'import jsii\n'), ((11112, 11143), 'jsii.get', 'jsii.get', (['self', '"""cfnProperties"""'], {}), "(self, 'cfnProperties')\n", (11120, 11143), False, 'import jsii\n'), ((11513, 11535), 'jsii.get', 'jsii.get', (['self', '"""tags"""'], {}), "(self, 'tags')\n", (11521, 11535), False, 'import jsii\n'), ((11906, 11934), 'jsii.get', 'jsii.get', (['self', '"""shardCount"""'], {}), "(self, 'shardCount')\n", (11914, 11934), False, 'import jsii\n'), ((12440, 12462), 'jsii.get', 'jsii.get', (['self', '"""name"""'], {}), "(self, 'name')\n", (12448, 12462), False, 'import jsii\n'), ((13030, 13068), 'jsii.get', 'jsii.get', (['self', '"""retentionPeriodHours"""'], {}), "(self, 'retentionPeriodHours')\n", (13038, 13068), False, 'import jsii\n'), ((13815, 13849), 'jsii.get', 'jsii.get', (['self', '"""streamEncryption"""'], {}), "(self, 'streamEncryption')\n", (13823, 13849), False, 'import jsii\n'), ((15502, 15535), 'typing.cast', 'typing.cast', (['builtins.str', 'result'], {}), '(builtins.str, result)\n', (15513, 15535), False, 'import typing\n'), ((16005, 16038), 'typing.cast', 'typing.cast', (['builtins.str', 'result'], {}), '(builtins.str, result)\n', (16016, 16038), False, 'import typing\n'), ((17902, 17943), 'jsii.invoke', 'jsii.invoke', (['self', '"""inspect"""', '[inspector]'], {}), "(self, 'inspect', [inspector])\n", (17913, 17943), False, 'import jsii\n'), ((18260, 18306), 'jsii.invoke', 'jsii.invoke', (['self', '"""renderProperties"""', '[props]'], {}), "(self, 'renderProperties', [props])\n", (18271, 18306), False, 'import jsii\n'), ((18585, 18625), 'jsii.sget', 'jsii.sget', (['cls', '"""CFN_RESOURCE_TYPE_NAME"""'], {}), "(cls, 'CFN_RESOURCE_TYPE_NAME')\n", (18594, 18625), False, 'import jsii\n'), ((18878, 18911), 'jsii.get', 'jsii.get', (['self', '"""attrConsumerArn"""'], {}), "(self, 'attrConsumerArn')\n", (18886, 18911), False, 'import jsii\n'), ((19207, 19254), 'jsii.get', 'jsii.get', (['self', '"""attrConsumerCreationTimestamp"""'], {}), "(self, 'attrConsumerCreationTimestamp')\n", (19215, 19254), False, 'import jsii\n'), ((19510, 19544), 'jsii.get', 'jsii.get', (['self', '"""attrConsumerName"""'], {}), "(self, 'attrConsumerName')\n", (19518, 19544), False, 'import jsii\n'), ((19806, 19842), 'jsii.get', 'jsii.get', (['self', '"""attrConsumerStatus"""'], {}), "(self, 'attrConsumerStatus')\n", (19814, 19842), False, 'import jsii\n'), ((20089, 20120), 'jsii.get', 'jsii.get', (['self', '"""attrStreamArn"""'], {}), "(self, 'attrStreamArn')\n", (20097, 20120), False, 'import jsii\n'), ((20355, 20386), 'jsii.get', 'jsii.get', (['self', '"""cfnProperties"""'], {}), "(self, 'cfnProperties')\n", (20363, 20386), False, 'import jsii\n'), ((20791, 20821), 'jsii.get', 'jsii.get', (['self', '"""consumerName"""'], {}), "(self, 'consumerName')\n", (20799, 20821), False, 'import jsii\n'), ((21345, 21372), 'jsii.get', 'jsii.get', (['self', '"""streamArn"""'], {}), "(self, 'streamArn')\n", (21353, 21372), False, 'import jsii\n'), ((82674, 82701), 'jsii.get', 'jsii.get', (['self', '"""streamArn"""'], {}), "(self, 'streamArn')\n", (82682, 82701), False, 'import jsii\n'), ((82946, 82974), 'jsii.get', 'jsii.get', (['self', '"""streamName"""'], {}), "(self, 'streamName')\n", (82954, 82974), False, 'import jsii\n'), ((83273, 83304), 'jsii.get', 'jsii.get', (['self', '"""encryptionKey"""'], {}), "(self, 'encryptionKey')\n", (83281, 83304), False, 'import jsii\n'), ((83685, 83732), 'jsii.invoke', 'jsii.invoke', (['self', '"""grant"""', '[grantee, *actions]'], {}), "(self, 'grant', [grantee, *actions])\n", (83696, 83732), False, 'import jsii\n'), ((84191, 84232), 'jsii.invoke', 'jsii.invoke', (['self', '"""grantRead"""', '[grantee]'], {}), "(self, 'grantRead', [grantee])\n", (84202, 84232), False, 'import jsii\n'), ((84714, 84760), 'jsii.invoke', 'jsii.invoke', (['self', '"""grantReadWrite"""', '[grantee]'], {}), "(self, 'grantReadWrite', [grantee])\n", (84725, 84760), False, 'import jsii\n'), ((85222, 85264), 'jsii.invoke', 'jsii.invoke', (['self', '"""grantWrite"""', '[grantee]'], {}), "(self, 'grantWrite', [grantee])\n", (85233, 85264), False, 'import jsii\n'), ((88000, 88049), 'jsii.invoke', 'jsii.invoke', (['self', '"""metric"""', '[metric_name, props]'], {}), "(self, 'metric', [metric_name, props])\n", (88011, 88049), False, 'import jsii\n'), ((91049, 91095), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecords"""', '[props]'], {}), "(self, 'metricGetRecords', [props])\n", (91060, 91095), False, 'import jsii\n'), ((94111, 94162), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsBytes"""', '[props]'], {}), "(self, 'metricGetRecordsBytes', [props])\n", (94122, 94162), False, 'import jsii\n'), ((97425, 97494), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsIteratorAgeMilliseconds"""', '[props]'], {}), "(self, 'metricGetRecordsIteratorAgeMilliseconds', [props])\n", (97436, 97494), False, 'import jsii\n'), ((100336, 100389), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsLatency"""', '[props]'], {}), "(self, 'metricGetRecordsLatency', [props])\n", (100347, 100389), False, 'import jsii\n'), ((103249, 103302), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsSuccess"""', '[props]'], {}), "(self, 'metricGetRecordsSuccess', [props])\n", (103260, 103302), False, 'import jsii\n'), ((106370, 106419), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricIncomingBytes"""', '[props]'], {}), "(self, 'metricIncomingBytes', [props])\n", (106381, 106419), False, 'import jsii\n'), ((109503, 109554), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricIncomingRecords"""', '[props]'], {}), "(self, 'metricIncomingRecords', [props])\n", (109514, 109554), False, 'import jsii\n'), ((112416, 112466), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordBytes"""', '[props]'], {}), "(self, 'metricPutRecordBytes', [props])\n", (112427, 112466), False, 'import jsii\n'), ((115305, 115357), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordLatency"""', '[props]'], {}), "(self, 'metricPutRecordLatency', [props])\n", (115316, 115357), False, 'import jsii\n'), ((118222, 118273), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsBytes"""', '[props]'], {}), "(self, 'metricPutRecordsBytes', [props])\n", (118233, 118273), False, 'import jsii\n'), ((121274, 121333), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsFailedRecords"""', '[props]'], {}), "(self, 'metricPutRecordsFailedRecords', [props])\n", (121285, 121333), False, 'import jsii\n'), ((124175, 124228), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsLatency"""', '[props]'], {}), "(self, 'metricPutRecordsLatency', [props])\n", (124186, 124228), False, 'import jsii\n'), ((127122, 127175), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsSuccess"""', '[props]'], {}), "(self, 'metricPutRecordsSuccess', [props])\n", (127133, 127175), False, 'import jsii\n'), ((130081, 130144), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsSuccessfulRecords"""', '[props]'], {}), "(self, 'metricPutRecordsSuccessfulRecords', [props])\n", (130092, 130144), False, 'import jsii\n'), ((133064, 133126), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsThrottledRecords"""', '[props]'], {}), "(self, 'metricPutRecordsThrottledRecords', [props])\n", (133075, 133126), False, 'import jsii\n'), ((136022, 136080), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsTotalRecords"""', '[props]'], {}), "(self, 'metricPutRecordsTotalRecords', [props])\n", (136033, 136080), False, 'import jsii\n'), ((139028, 139080), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordSuccess"""', '[props]'], {}), "(self, 'metricPutRecordSuccess', [props])\n", (139039, 139080), False, 'import jsii\n'), ((142320, 142389), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricReadProvisionedThroughputExceeded"""', '[props]'], {}), "(self, 'metricReadProvisionedThroughputExceeded', [props])\n", (142331, 142389), False, 'import jsii\n'), ((145665, 145735), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricWriteProvisionedThroughputExceeded"""', '[props]'], {}), "(self, 'metricWriteProvisionedThroughputExceeded', [props])\n", (145676, 145735), False, 'import jsii\n'), ((148543, 148602), 'jsii.sinvoke', 'jsii.sinvoke', (['cls', '"""fromStreamArn"""', '[scope, id, stream_arn]'], {}), "(cls, 'fromStreamArn', [scope, id, stream_arn])\n", (148555, 148602), False, 'import jsii\n'), ((149448, 149509), 'jsii.sinvoke', 'jsii.sinvoke', (['cls', '"""fromStreamAttributes"""', '[scope, id, attrs]'], {}), "(cls, 'fromStreamAttributes', [scope, id, attrs])\n", (149460, 149509), False, 'import jsii\n'), ((149905, 149952), 'jsii.invoke', 'jsii.invoke', (['self', '"""grant"""', '[grantee, *actions]'], {}), "(self, 'grant', [grantee, *actions])\n", (149916, 149952), False, 'import jsii\n'), ((150411, 150452), 'jsii.invoke', 'jsii.invoke', (['self', '"""grantRead"""', '[grantee]'], {}), "(self, 'grantRead', [grantee])\n", (150422, 150452), False, 'import jsii\n'), ((150934, 150980), 'jsii.invoke', 'jsii.invoke', (['self', '"""grantReadWrite"""', '[grantee]'], {}), "(self, 'grantReadWrite', [grantee])\n", (150945, 150980), False, 'import jsii\n'), ((151442, 151484), 'jsii.invoke', 'jsii.invoke', (['self', '"""grantWrite"""', '[grantee]'], {}), "(self, 'grantWrite', [grantee])\n", (151453, 151484), False, 'import jsii\n'), ((154220, 154269), 'jsii.invoke', 'jsii.invoke', (['self', '"""metric"""', '[metric_name, props]'], {}), "(self, 'metric', [metric_name, props])\n", (154231, 154269), False, 'import jsii\n'), ((157285, 157331), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecords"""', '[props]'], {}), "(self, 'metricGetRecords', [props])\n", (157296, 157331), False, 'import jsii\n'), ((160347, 160398), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsBytes"""', '[props]'], {}), "(self, 'metricGetRecordsBytes', [props])\n", (160358, 160398), False, 'import jsii\n'), ((163661, 163730), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsIteratorAgeMilliseconds"""', '[props]'], {}), "(self, 'metricGetRecordsIteratorAgeMilliseconds', [props])\n", (163672, 163730), False, 'import jsii\n'), ((166590, 166643), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsLatency"""', '[props]'], {}), "(self, 'metricGetRecordsLatency', [props])\n", (166601, 166643), False, 'import jsii\n'), ((169503, 169556), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricGetRecordsSuccess"""', '[props]'], {}), "(self, 'metricGetRecordsSuccess', [props])\n", (169514, 169556), False, 'import jsii\n'), ((172624, 172673), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricIncomingBytes"""', '[props]'], {}), "(self, 'metricIncomingBytes', [props])\n", (172635, 172673), False, 'import jsii\n'), ((175757, 175808), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricIncomingRecords"""', '[props]'], {}), "(self, 'metricIncomingRecords', [props])\n", (175768, 175808), False, 'import jsii\n'), ((178670, 178720), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordBytes"""', '[props]'], {}), "(self, 'metricPutRecordBytes', [props])\n", (178681, 178720), False, 'import jsii\n'), ((181559, 181611), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordLatency"""', '[props]'], {}), "(self, 'metricPutRecordLatency', [props])\n", (181570, 181611), False, 'import jsii\n'), ((184476, 184527), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsBytes"""', '[props]'], {}), "(self, 'metricPutRecordsBytes', [props])\n", (184487, 184527), False, 'import jsii\n'), ((187528, 187587), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsFailedRecords"""', '[props]'], {}), "(self, 'metricPutRecordsFailedRecords', [props])\n", (187539, 187587), False, 'import jsii\n'), ((190429, 190482), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsLatency"""', '[props]'], {}), "(self, 'metricPutRecordsLatency', [props])\n", (190440, 190482), False, 'import jsii\n'), ((193376, 193429), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsSuccess"""', '[props]'], {}), "(self, 'metricPutRecordsSuccess', [props])\n", (193387, 193429), False, 'import jsii\n'), ((196335, 196398), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsSuccessfulRecords"""', '[props]'], {}), "(self, 'metricPutRecordsSuccessfulRecords', [props])\n", (196346, 196398), False, 'import jsii\n'), ((199318, 199380), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsThrottledRecords"""', '[props]'], {}), "(self, 'metricPutRecordsThrottledRecords', [props])\n", (199329, 199380), False, 'import jsii\n'), ((202276, 202334), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordsTotalRecords"""', '[props]'], {}), "(self, 'metricPutRecordsTotalRecords', [props])\n", (202287, 202334), False, 'import jsii\n'), ((205282, 205334), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricPutRecordSuccess"""', '[props]'], {}), "(self, 'metricPutRecordSuccess', [props])\n", (205293, 205334), False, 'import jsii\n'), ((208574, 208643), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricReadProvisionedThroughputExceeded"""', '[props]'], {}), "(self, 'metricReadProvisionedThroughputExceeded', [props])\n", (208585, 208643), False, 'import jsii\n'), ((211919, 211989), 'jsii.invoke', 'jsii.invoke', (['self', '"""metricWriteProvisionedThroughputExceeded"""', '[props]'], {}), "(self, 'metricWriteProvisionedThroughputExceeded', [props])\n", (211930, 211989), False, 'import jsii\n'), ((212196, 212223), 'jsii.get', 'jsii.get', (['self', '"""streamArn"""'], {}), "(self, 'streamArn')\n", (212204, 212223), False, 'import jsii\n'), ((212433, 212461), 'jsii.get', 'jsii.get', (['self', '"""streamName"""'], {}), "(self, 'streamName')\n", (212441, 212461), False, 'import jsii\n'), ((212760, 212791), 'jsii.get', 'jsii.get', (['self', '"""encryptionKey"""'], {}), "(self, 'encryptionKey')\n", (212768, 212791), False, 'import jsii\n')] |
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
super(EncoderCNN, self).__init__()
resnet = models.resnet50(pretrained=True)
for param in resnet.parameters():
param.requires_grad_(False)
modules = list(resnet.children())[:-1]
self.resnet = nn.Sequential(*modules)
self.embed = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
def forward(self, images):
features = self.resnet(images)
features = features.view(features.size(0), -1)
features = self.bn(self.embed(features))
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers=1):
super(DecoderRNN, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.word_embeddings = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
def forward(self, features, captions):
captions = captions[:, :-1]
#batch_size
batch_size = features.size(0)
#hidden_state and cell state
hidden_state = torch.zeros((1, batch_size, self.hidden_size)).cuda()
cell_state = torch.zeros((1, batch_size, self.hidden_size)).cuda()
# create embedding
embeds = self.word_embeddings(captions)
embeds = torch.cat((features.unsqueeze(1), embeds), dim=1)
# embeddings new shape : (batch_size, captions length - 1, embed_size)
lstm_out, _ = self.lstm(embeds, (hidden_state, cell_state))
outputs = self.linear(lstm_out)
return outputs
def sample(self, inputs, states=None, max_len=20):
" accepts pre-processed image tensor (inputs) and returns predicted sentence (list of tensor ids of length max_len) "
sampled_ids = []
for i in range(max_len): # maximum sampling length
hiddens, states = self.lstm(inputs, states) # (batch_size, 1, hidden_size),
outputs = self.linear(hiddens.squeeze(1)) # (batch_size, vocab_size)
predicted = outputs.max(1)[1]
if predicted.item() == 1:
break
sampled_ids.append(predicted)
inputs = self.word_embeddings(predicted)
inputs = inputs.unsqueeze(1) # (batch_size, 1, embed_size)
return [pred.item() for pred in sampled_ids]
| [
"torch.nn.Sequential",
"torch.nn.LSTM",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torchvision.models.resnet50",
"torch.zeros",
"torch.nn.Embedding"
] | [((249, 281), 'torchvision.models.resnet50', 'models.resnet50', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (264, 281), True, 'import torchvision.models as models\n'), ((442, 465), 'torch.nn.Sequential', 'nn.Sequential', (['*modules'], {}), '(*modules)\n', (455, 465), True, 'import torch.nn as nn\n'), ((487, 531), 'torch.nn.Linear', 'nn.Linear', (['resnet.fc.in_features', 'embed_size'], {}), '(resnet.fc.in_features, embed_size)\n', (496, 531), True, 'import torch.nn as nn\n'), ((550, 591), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['embed_size'], {'momentum': '(0.01)'}), '(embed_size, momentum=0.01)\n', (564, 591), True, 'import torch.nn as nn\n'), ((1051, 1087), 'torch.nn.Embedding', 'nn.Embedding', (['vocab_size', 'embed_size'], {}), '(vocab_size, embed_size)\n', (1063, 1087), True, 'import torch.nn as nn\n'), ((1108, 1170), 'torch.nn.LSTM', 'nn.LSTM', (['embed_size', 'hidden_size', 'num_layers'], {'batch_first': '(True)'}), '(embed_size, hidden_size, num_layers, batch_first=True)\n', (1115, 1170), True, 'import torch.nn as nn\n'), ((1193, 1227), 'torch.nn.Linear', 'nn.Linear', (['hidden_size', 'vocab_size'], {}), '(hidden_size, vocab_size)\n', (1202, 1227), True, 'import torch.nn as nn\n'), ((1430, 1476), 'torch.zeros', 'torch.zeros', (['(1, batch_size, self.hidden_size)'], {}), '((1, batch_size, self.hidden_size))\n', (1441, 1476), False, 'import torch\n'), ((1505, 1551), 'torch.zeros', 'torch.zeros', (['(1, batch_size, self.hidden_size)'], {}), '((1, batch_size, self.hidden_size))\n', (1516, 1551), False, 'import torch\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 14:36:46 2019
@author: Tawanda
"""
import sys
import argparse
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--driver", help="path to chrome driver")
args = parser.parse_args()
if not args.driver:
print("Please enter a valid path to the chrome driver ( --driver argument )")
sys.exit(1)
browser = webdriver.Chrome(executable_path=args.driver)
browser.implicitly_wait(10)
browser.maximize_window()
try:
browser.get('https://www.oursky.com/')
button = browser.find_element_by_class_name('btn-header')
button.click()
print('=======Button Click test was successful=======')
except NoSuchElementException as ex:
print(f'Error :: No such element : {ex}') | [
"selenium.webdriver.Chrome",
"argparse.ArgumentParser",
"sys.exit"
] | [((276, 301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (299, 301), False, 'import argparse\n'), ((553, 598), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'executable_path': 'args.driver'}), '(executable_path=args.driver)\n', (569, 598), False, 'from selenium import webdriver\n'), ((522, 533), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (530, 533), False, 'import sys\n')] |
"""Learn ideal points with the text-based ideal point model (TBIP).
Let y_{dv} denote the counts of word v in document d. Let x_d refer to the
ideal point of the author of document d. Then we model:
theta, beta ~ Gamma(alpha, alpha)
x, eta ~ N(0, 1)
y_{dv} ~ Pois(sum_k theta_dk beta_kv exp(x_d * eta_kv).
We perform variational inference to provide estimates for the posterior
distribution of each latent variable. We take reparameterization gradients,
using a lognormal variational family for the positive variables (theta, beta)
and a normal variational family for the real variables (x, eta).
The directory `data/{data_name}/clean/` should have the following four files:
1. `counts.npz`: a [num_documents, num_words] sparse matrix containing the
word counts for each document.
2. `author_indices.npy`: a [num_documents] vector where each entry is an
integer in the set {0, 1, ..., num_authors - 1}, indicating the author of
the corresponding document in `counts.npz`.
3. `vocabulary.txt`: a [num_words]-length file where each line is a string
denoting the corresponding word in the vocabulary.
4. `author_map.txt`: a [num_authors]-length file where each line is a string
denoting the name of an author in the corpus.
We provide more details in our paper [1].
#### References
[1]: <NAME>, <NAME>, <NAME>. Text-Based Ideal Points. In
_Conference of the Association for Computational Linguistics_, 2020.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import flags
import numpy as np
import scipy.sparse as sparse
import tensorflow as tf
import tensorflow_probability as tfp
flags.DEFINE_float("learning_rate",
default=0.01,
help="Adam learning rate.")
flags.DEFINE_integer("max_steps",
default=1000000,
help="Number of training steps to run.")
flags.DEFINE_integer("num_topics",
default=50,
help="Number of topics.")
flags.DEFINE_integer("batch_size",
default=1024,
help="Batch size.")
flags.DEFINE_integer("num_samples",
default=1,
help="Number of samples to use for ELBO approximation.")
flags.DEFINE_enum("counts_transformation",
default="nothing",
enum_values=["nothing", "binary", "sqrt", "log"],
help="Transformation used on counts data.")
flags.DEFINE_boolean("pre_initialize_parameters",
default=True,
help="Whether to use pre-initialized document and topic "
"intensities (with Poisson factorization).")
flags.DEFINE_string("data",
default="senate-speeches-114",
help="Data source being used.")
flags.DEFINE_integer("senate_session",
default=113,
help="Senate session (used only when data is "
"'senate-speech-comparisons'.")
flags.DEFINE_integer("print_steps",
default=500,
help="Number of steps to print and save results.")
flags.DEFINE_integer("seed",
default=123,
help="Random seed to be used.")
FLAGS = flags.FLAGS
def build_input_pipeline(data_dir,
batch_size,
random_state,
counts_transformation="nothing"):
"""Load data and build iterator for minibatches.
Args:
data_dir: The directory where the data is located. There must be four
files inside the rep: `counts.npz`, `author_indices.npy`,
`author_map.txt`, and `vocabulary.txt`.
batch_size: The batch size to use for training.
random_state: A NumPy `RandomState` object, used to shuffle the data.
counts_transformation: A string indicating how to transform the counts.
One of "nothing", "binary", "log", or "sqrt".
"""
counts = sparse.load_npz(os.path.join(data_dir, "counts.npz"))
num_documents, num_words = counts.shape
author_indices = np.load(
os.path.join(data_dir, "author_indices.npy")).astype(np.int32)
num_authors = np.max(author_indices + 1)
author_map = np.loadtxt(os.path.join(data_dir, "author_map.txt"),
dtype=str,
delimiter="\n",
encoding='latin-1')
# Shuffle data.
documents = random_state.permutation(num_documents)
shuffled_author_indices = author_indices[documents]
shuffled_counts = counts[documents]
# Apply counts transformation.
if counts_transformation == "nothing":
count_values = shuffled_counts.data
elif counts_transformation == "binary":
count_values = np.int32(shuffled_counts.data > 0)
elif counts_transformation == "log":
count_values = np.round(np.log(1 + shuffled_counts.data))
elif counts_transformation == "sqrt":
count_values = np.round(np.sqrt(shuffled_counts.data))
else:
raise ValueError("Unrecognized counts transformation.")
# Store counts as sparse tensor so it occupies less memory.
shuffled_counts = tf.SparseTensor(
indices=np.array(shuffled_counts.nonzero()).T,
values=count_values,
dense_shape=shuffled_counts.shape)
dataset = tf.data.Dataset.from_tensor_slices(
(documents, shuffled_counts, shuffled_author_indices))
batches = dataset.repeat().batch(batch_size).prefetch(batch_size)
iterator = batches.make_one_shot_iterator()
vocabulary = np.loadtxt(os.path.join(data_dir, "vocabulary.txt"),
dtype=str,
delimiter="\n",
comments="<!-")
total_counts_per_author = np.bincount(
author_indices,
weights=np.array(np.sum(counts, axis=1)).flatten())
counts_per_document_per_author = (
total_counts_per_author / np.bincount(author_indices))
# Author weights is how much lengthy each author's opinion over average is.
author_weights = (counts_per_document_per_author /
np.mean(np.sum(counts, axis=1))).astype(np.float32)
return (iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors)
def build_lognormal_variational_parameters(initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
num_topics):
"""
Build document and objective topic lognormal variational parameters.
Args:
initial_document_loc: A [num_documents, num_topics] NumPy array containing
the initial document intensity means.
initial_objective_topic_loc: A [num_topics, num_words] NumPy array
containing the initial objective topic means.
num_documents: Number of documents in the data set.
num_words: Number of words in the data set.
num_topics: Number of topics.
Returns:
document_loc: A Variable object with shape [num_documents, num_topics].
document_scale: A positive Variable object with shape [num_documents,
num_topics].
objective_topic_loc: A Variable object with shape [num_topics, num_words].
objective_topic_scale: A positive Variable object with shape [num_topics,
num_words].
"""
document_loc = tf.get_variable(
"document_loc",
initializer=tf.constant(np.log(initial_document_loc)))
objective_topic_loc = tf.get_variable(
"objective_topic_loc",
initializer=tf.constant(np.log(initial_objective_topic_loc)))
document_scale_logit = tf.get_variable(
"document_scale_logit",
shape=[num_documents, num_topics],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
objective_topic_scale_logit = tf.get_variable(
"objective_topic_scale_logit",
shape=[num_topics, num_words],
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
dtype=tf.float32)
document_scale = tf.nn.softplus(document_scale_logit)
objective_topic_scale = tf.nn.softplus(objective_topic_scale_logit)
tf.summary.histogram("params/document_loc", document_loc)
tf.summary.histogram("params/objective_topic_loc", objective_topic_loc)
tf.summary.histogram("params/document_scale", document_scale)
tf.summary.histogram("params/objective_topic_scale", objective_topic_scale)
return (document_loc, document_scale,
objective_topic_loc, objective_topic_scale)
def print_topics(neutral_mean, negative_mean, positive_mean, vocabulary):
"""Get neutral and ideological topics to be used for Tensorboard.
Args:
neutral_mean: The mean of the neutral topics, a NumPy matrix with shape
[num_topics, num_words].
negative_mean: The mean of the negative topics, a NumPy matrix with shape
[num_topics, num_words].
positive_mean: The mean of the positive topics, a NumPy matrix with shape
[num_topics, num_words].
vocabulary: A list of the vocabulary with shape [num_words].
Returns:
topic_strings: A list of the negative, neutral, and positive topics.
"""
num_topics, num_words = neutral_mean.shape
words_per_topic = 10
top_neutral_words = np.argsort(-neutral_mean, axis=1)
top_negative_words = np.argsort(-negative_mean, axis=1)
top_positive_words = np.argsort(-positive_mean, axis=1)
topic_strings = []
for topic_idx in range(num_topics):
neutral_start_string = "Neutral {}:".format(topic_idx)
neutral_row = [vocabulary[word] for word in
top_neutral_words[topic_idx, :words_per_topic]]
neutral_row_string = ", ".join(neutral_row)
neutral_string = " ".join([neutral_start_string, neutral_row_string])
positive_start_string = "Positive {}:".format(topic_idx)
positive_row = [vocabulary[word] for word in
top_positive_words[topic_idx, :words_per_topic]]
positive_row_string = ", ".join(positive_row)
positive_string = " ".join([positive_start_string, positive_row_string])
negative_start_string = "Negative {}:".format(topic_idx)
negative_row = [vocabulary[word] for word in
top_negative_words[topic_idx, :words_per_topic]]
negative_row_string = ", ".join(negative_row)
negative_string = " ".join([negative_start_string, negative_row_string])
topic_strings.append(" \n".join(
[negative_string, neutral_string, positive_string]))
return np.array(topic_strings)
def print_ideal_points(ideal_point_loc, author_map):
"""Print ideal point ordering for Tensorboard."""
return ", ".join(author_map[np.argsort(ideal_point_loc)])
def get_log_prior(samples, prior):
"""Return log prior of sampled Gaussians.
Args:
samples: A `Tensor` with shape `[num_samples, :, :]`.
prior: String representing prior distribution.
Returns:
log_prior: A `Tensor` with shape `[num_samples]`, with the log priors
summed across latent dimensions.
"""
if prior == 'normal':
prior_distribution = tfp.distributions.Normal(loc=0., scale=1.)
elif prior == 'gamma':
prior_distribution = tfp.distributions.Gamma(concentration=0.3, rate=0.3)
log_prior = tf.reduce_sum(prior_distribution.log_prob(samples),
axis=[1, 2])
return log_prior
def get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
batch_size,
num_samples=1):
"""Approximate variational Lognormal ELBO using reparameterization.
Args:
counts: A matrix with shape `[batch_size, num_words]`.
document_indices: An int-vector with shape `[batch_size]`.
author_indices: An int-vector with shape `[batch_size]`.
author_weights: A vector with shape `[num_authors]`, constituting how
lengthy the opinion is above average.
document_distribution: A positive `Distribution` object with parameter
shape `[num_documents, num_topics]`.
objective_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideological_topic_distribution: A positive `Distribution` object with
parameter shape `[num_topics, num_words]`.
ideal_point_distribution: A `Distribution` object over [0, 1] with
parameter_shape `[num_authors]`.
num_documents: The number of documents in the total data set (used to
calculate log-likelihood scale).
batch_size: Batch size (used to calculate log-likelihood scale).
num_samples: Number of Monte-Carlo samples.
Returns:
elbo: A scalar representing a Monte-Carlo sample of the ELBO. This value is
averaged across samples and summed across batches.
"""
document_samples = document_distribution.sample(num_samples)
objective_topic_samples = objective_topic_distribution.sample(num_samples)
ideological_topic_samples = ideological_topic_distribution.sample(
num_samples)
ideal_point_samples = ideal_point_distribution.sample(num_samples)
_, num_topics, _ = objective_topic_samples.get_shape().as_list()
ideal_point_log_prior = tfp.distributions.Normal(
loc=0.,
scale=1.)
ideal_point_log_prior = tf.reduce_sum(
ideal_point_log_prior.log_prob(ideal_point_samples), axis=[1,2])
document_log_prior = get_log_prior(document_samples, 'gamma')
objective_topic_log_prior = get_log_prior(objective_topic_samples, 'gamma')
ideological_topic_log_prior = get_log_prior(ideological_topic_samples,
'normal')
log_prior = (document_log_prior +
objective_topic_log_prior +
ideological_topic_log_prior +
ideal_point_log_prior)
selected_document_samples = tf.gather(document_samples,
document_indices,
axis=1)
selected_ideal_points = tf.gather(ideal_point_samples,
author_indices,
axis=1)
selected_ideological_topic_samples = tf.exp(
# replace by a column
selected_ideal_points[:, :, :, tf.newaxis] *
ideological_topic_samples[:, tf.newaxis, :, :])
# Normalize by how lengthy the author's opinion is.
selected_author_weights = tf.gather(author_weights, author_indices)
selected_ideological_topic_samples = (
selected_author_weights[tf.newaxis, :, tf.newaxis, tf.newaxis] *
selected_ideological_topic_samples)
document_entropy = -tf.reduce_sum(
document_distribution.log_prob(document_samples),
axis=[1, 2])
objective_topic_entropy = -tf.reduce_sum(
objective_topic_distribution.log_prob(objective_topic_samples),
axis=[1, 2])
ideological_topic_entropy = -tf.reduce_sum(
ideological_topic_distribution.log_prob(ideological_topic_samples),
axis=[1, 2])
ideal_point_entropy = -tf.reduce_sum(
ideal_point_distribution.log_prob(ideal_point_samples),
axis=1)
entropy = (document_entropy +
objective_topic_entropy +
ideological_topic_entropy +
ideal_point_entropy)
rate = tf.reduce_sum(
selected_document_samples[:, :, :, tf.newaxis] *
objective_topic_samples[:, tf.newaxis, :, :] *
selected_ideological_topic_samples[:, :, :, :],
axis=2)
count_distribution = tfp.distributions.Poisson(rate=rate)
# Need to un-sparsify the counts to evaluate log-likelihood.
count_log_likelihood = count_distribution.log_prob(
tf.sparse.to_dense(counts))
count_log_likelihood = tf.reduce_sum(count_log_likelihood, axis=[1, 2])
# Adjust for the fact that we're only using a minibatch.
count_log_likelihood = count_log_likelihood * (num_documents / batch_size)
elbo = log_prior + count_log_likelihood + entropy
elbo = tf.reduce_mean(elbo)
tf.summary.scalar("elbo/elbo", elbo)
tf.summary.scalar("elbo/log_prior", tf.reduce_mean(log_prior))
tf.summary.scalar("elbo/count_log_likelihood",
tf.reduce_mean(count_log_likelihood))
tf.summary.scalar("elbo/entropy", tf.reduce_mean(entropy))
return elbo
def main(argv):
del argv
tf.set_random_seed(FLAGS.seed)
random_state = np.random.RandomState(FLAGS.seed)
project_dir = os.path.abspath(os.path.dirname(__file__))
source_dir = os.path.join(project_dir, "data/{}".format(FLAGS.data))
# For model comparisons, we must also specify a Senate session.
if FLAGS.data == "senate-speech-comparisons":
source_dir = os.path.join(
source_dir, "tbip/{}".format(FLAGS.senate_session))
# As described in the docstring, the data directory must have the following
# files: counts.npz, author_indices.npy, vocabulary.txt, author_map.txt.
data_dir = os.path.join(source_dir, "clean")
save_dir = os.path.join(source_dir, "tbip-fits")
if tf.gfile.Exists(save_dir):
tf.logging.warn("Deleting old log directory at {}".format(save_dir))
tf.gfile.DeleteRecursively(save_dir)
tf.gfile.MakeDirs(save_dir)
(iterator, author_weights, vocabulary, author_map,
num_documents, num_words, num_authors) = build_input_pipeline(
data_dir,
FLAGS.batch_size,
random_state,
FLAGS.counts_transformation)
document_indices, counts, author_indices = iterator.get_next()
if FLAGS.pre_initialize_parameters:
fit_dir = os.path.join(source_dir, "pf-fits")
fitted_document_shape = np.load(
os.path.join(fit_dir, "document_shape.npy")).astype(np.float32)
fitted_document_rate = np.load(
os.path.join(fit_dir, "document_rate.npy")).astype(np.float32)
fitted_topic_shape = np.load(
os.path.join(fit_dir, "topic_shape.npy")).astype(np.float32)
fitted_topic_rate = np.load(
os.path.join(fit_dir, "topic_rate.npy")).astype(np.float32)
initial_document_loc = fitted_document_shape / fitted_document_rate
initial_objective_topic_loc = fitted_topic_shape / fitted_topic_rate
else:
initial_document_loc = np.float32(
np.exp(random_state.randn(num_documents, FLAGS.num_topics)))
initial_objective_topic_loc = np.float32(
np.exp(random_state.randn(FLAGS.num_topics, num_words)))
# Initialize lognormal variational parameters.
(document_loc, document_scale, objective_topic_loc,
objective_topic_scale) = build_lognormal_variational_parameters(
initial_document_loc,
initial_objective_topic_loc,
num_documents,
num_words,
FLAGS.num_topics)
document_distribution = tfp.distributions.LogNormal(
loc=document_loc,
scale=document_scale)
objective_topic_distribution = tfp.distributions.LogNormal(
loc=objective_topic_loc,
scale=objective_topic_scale)
ideological_topic_loc = tf.get_variable(
"ideological_topic_loc",
shape=[FLAGS.num_topics, num_words],
dtype=tf.float32)
ideological_topic_scale_logit = tf.get_variable(
"ideological_topic_scale_logit",
shape=[FLAGS.num_topics, num_words],
dtype=tf.float32)
ideological_topic_scale = tf.nn.softplus(ideological_topic_scale_logit)
tf.summary.histogram("params/ideological_topic_loc", ideological_topic_loc)
tf.summary.histogram("params/ideological_topic_scale",
ideological_topic_scale)
ideological_topic_distribution = tfp.distributions.Normal(
loc=ideological_topic_loc,
scale=ideological_topic_scale)
ideal_point_loc = tf.get_variable(
"ideal_point_loc",
shape=[num_authors],
dtype=tf.float32)
ideal_point_scale_logit = tf.get_variable(
"ideal_point_scale_logit",
initializer=tf.initializers.random_normal(mean=0, stddev=1.),
shape=[num_authors],
dtype=tf.float32)
ideal_point_scale = tf.nn.softplus(ideal_point_scale_logit)
ideal_point_distribution = tfp.distributions.Normal(
loc=ideal_point_loc,
scale=ideal_point_scale)
tf.summary.histogram("params/ideal_point_loc",
tf.reshape(ideal_point_loc, [-1]))
tf.summary.histogram("params/ideal_point_scale",
tf.reshape(ideal_point_scale, [-1]))
elbo = get_elbo(counts,
document_indices,
author_indices,
author_weights,
document_distribution,
objective_topic_distribution,
ideological_topic_distribution,
ideal_point_distribution,
num_documents,
FLAGS.batch_size,
num_samples=FLAGS.num_samples)
loss = -elbo
tf.summary.scalar("loss", loss)
optim = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
train_op = optim.minimize(loss)
"""
For each (k,v), we want to evaluate E[beta_kv], E[beta_kv * exp(eta_kv)],
and E[beta_kv * exp(-eta_kv)], where the expectations are with respect to the
variational distributions. Like the paper, beta refers to the obective topic
and eta refers to the ideological topic.
Dropping the indices and denoting by mu_b the objective topic location and
sigma_b the objective topic scale, we have E[beta] = exp(mu + sigma_b^2 / 2),
using the mean of a lognormal distribution.
Denoting by mu_e the ideological topic location and sigma_e the ideological
topic scale, we have E[beta * exp(eta)] = E[beta]E[exp(eta)] by the
mean-field assumption. exp(eta) is lognormal distributed, so E[exp(eta)] =
exp(mu_e + sigma_e^2 / 2). Thus, E[beta * exp(eta)] =
exp(mu_b + mu_e + (sigma_b^2 + sigma_e^2) / 2).
Finally, E[beta * exp(-eta)] =
exp(mu_b - mu_e + (sigma_b^2 + sigma_e^2) / 2).
Because we only care about the orderings of topics, we can drop the exponents
from the means.
"""
neutral_mean = objective_topic_loc + objective_topic_scale ** 2 / 2
positive_mean = (objective_topic_loc +
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
negative_mean = (objective_topic_loc -
ideological_topic_loc +
(objective_topic_scale ** 2 +
ideological_topic_scale ** 2) / 2)
positive_mean_at_two = (objective_topic_loc +
2*ideological_topic_loc +
(objective_topic_scale ** 2 +
2*ideological_topic_scale ** 2) / 2)
negative_mean_at_two = (objective_topic_loc -
2*ideological_topic_loc +
(objective_topic_scale ** 2 +
2*ideological_topic_scale ** 2) / 2)
topics = tf.py_func(
functools.partial(print_topics, vocabulary=vocabulary),
[neutral_mean, negative_mean, positive_mean],
tf.string,
stateful=False)
ideal_point_list = tf.py_func(
functools.partial(print_ideal_points, author_map=author_map),
[ideal_point_loc],
tf.string, stateful=False)
tf.summary.text("topics", topics)
tf.summary.text("ideal_points", ideal_point_list)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(save_dir, sess.graph)
sess.run(init)
start_time = time.time()
for step in range(FLAGS.max_steps):
(_, elbo_val) = sess.run([train_op, elbo])
duration = (time.time() - start_time) / (step + 1)
if step % FLAGS.print_steps == 0:
print("Step: {:>3d} ELBO: {:.3f} ({:.3f} sec)".format(
step, elbo_val, duration))
summary_str = sess.run(summary)
summary_writer.add_summary(summary_str, step)
summary_writer.flush()
if step % 1000 == 0 or step == FLAGS.max_steps - 1:
param_save_dir = os.path.join(save_dir, "params/")
if not tf.gfile.Exists(param_save_dir):
tf.gfile.MakeDirs(param_save_dir)
(ideological_topic_loc_val, ideological_topic_scale_val,
ideal_point_loc_val, ideal_point_scale_val) = sess.run([
ideological_topic_loc, ideological_topic_scale,
ideal_point_loc, ideal_point_scale])
(document_loc_val, document_scale_val, objective_topic_loc_val,
objective_topic_scale_val, ideological_topic_loc_val,
ideological_topic_scale_val, ideal_point_loc_val,
ideal_point_scale_val) = sess.run([
document_loc, document_scale, objective_topic_loc,
objective_topic_scale, ideological_topic_loc,
ideological_topic_scale, ideal_point_loc, ideal_point_scale])
np.save(os.path.join(param_save_dir, "document_loc"),
document_loc_val)
np.save(os.path.join(param_save_dir, "document_scale"),
document_scale_val)
np.save(os.path.join(param_save_dir, "objective_topic_loc"),
objective_topic_loc_val)
np.save(os.path.join(param_save_dir, "objective_topic_scale"),
objective_topic_scale_val)
np.save(os.path.join(param_save_dir, "ideological_topic_loc"),
ideological_topic_loc_val)
np.save(os.path.join(param_save_dir, "ideological_topic_scale"),
ideological_topic_scale_val)
np.save(os.path.join(param_save_dir, "ideal_point_loc"),
ideal_point_loc_val)
np.save(os.path.join(param_save_dir, "ideal_point_scale"),
ideal_point_scale_val)
if __name__ == "__main__":
tf.app.run()
| [
"numpy.sqrt",
"tensorflow.get_variable",
"tensorflow.initializers.random_normal",
"tensorflow.reduce_sum",
"numpy.int32",
"numpy.log",
"numpy.argsort",
"numpy.array",
"tensorflow.nn.softplus",
"tensorflow.gfile.MakeDirs",
"tensorflow.reduce_mean",
"tensorflow.sparse.to_dense",
"tensorflow.se... | [((1743, 1820), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""'], {'default': '(0.01)', 'help': '"""Adam learning rate."""'}), "('learning_rate', default=0.01, help='Adam learning rate.')\n", (1761, 1820), False, 'from absl import flags\n'), ((1859, 1955), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_steps"""'], {'default': '(1000000)', 'help': '"""Number of training steps to run."""'}), "('max_steps', default=1000000, help=\n 'Number of training steps to run.')\n", (1879, 1955), False, 'from absl import flags\n'), ((1993, 2065), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_topics"""'], {'default': '(50)', 'help': '"""Number of topics."""'}), "('num_topics', default=50, help='Number of topics.')\n", (2013, 2065), False, 'from absl import flags\n'), ((2108, 2176), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""'], {'default': '(1024)', 'help': '"""Batch size."""'}), "('batch_size', default=1024, help='Batch size.')\n", (2128, 2176), False, 'from absl import flags\n'), ((2219, 2327), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_samples"""'], {'default': '(1)', 'help': '"""Number of samples to use for ELBO approximation."""'}), "('num_samples', default=1, help=\n 'Number of samples to use for ELBO approximation.')\n", (2239, 2327), False, 'from absl import flags\n'), ((2365, 2530), 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""counts_transformation"""'], {'default': '"""nothing"""', 'enum_values': "['nothing', 'binary', 'sqrt', 'log']", 'help': '"""Transformation used on counts data."""'}), "('counts_transformation', default='nothing', enum_values=[\n 'nothing', 'binary', 'sqrt', 'log'], help=\n 'Transformation used on counts data.')\n", (2382, 2530), False, 'from absl import flags\n'), ((2575, 2748), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""pre_initialize_parameters"""'], {'default': '(True)', 'help': '"""Whether to use pre-initialized document and topic intensities (with Poisson factorization)."""'}), "('pre_initialize_parameters', default=True, help=\n 'Whether to use pre-initialized document and topic intensities (with Poisson factorization).'\n )\n", (2595, 2748), False, 'from absl import flags\n'), ((2810, 2905), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""data"""'], {'default': '"""senate-speeches-114"""', 'help': '"""Data source being used."""'}), "('data', default='senate-speeches-114', help=\n 'Data source being used.')\n", (2829, 2905), False, 'from absl import flags\n'), ((2941, 3073), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""senate_session"""'], {'default': '(113)', 'help': '"""Senate session (used only when data is \'senate-speech-comparisons\'."""'}), '(\'senate_session\', default=113, help=\n "Senate session (used only when data is \'senate-speech-comparisons\'.")\n', (2961, 3073), False, 'from absl import flags\n'), ((3140, 3244), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""print_steps"""'], {'default': '(500)', 'help': '"""Number of steps to print and save results."""'}), "('print_steps', default=500, help=\n 'Number of steps to print and save results.')\n", (3160, 3244), False, 'from absl import flags\n'), ((3282, 3355), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""'], {'default': '(123)', 'help': '"""Random seed to be used."""'}), "('seed', default=123, help='Random seed to be used.')\n", (3302, 3355), False, 'from absl import flags\n'), ((4321, 4347), 'numpy.max', 'np.max', (['(author_indices + 1)'], {}), '(author_indices + 1)\n', (4327, 4347), True, 'import numpy as np\n'), ((5423, 5516), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(documents, shuffled_counts, shuffled_author_indices)'], {}), '((documents, shuffled_counts,\n shuffled_author_indices))\n', (5457, 5516), True, 'import tensorflow as tf\n'), ((8197, 8233), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['document_scale_logit'], {}), '(document_scale_logit)\n', (8211, 8233), True, 'import tensorflow as tf\n'), ((8260, 8303), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['objective_topic_scale_logit'], {}), '(objective_topic_scale_logit)\n', (8274, 8303), True, 'import tensorflow as tf\n'), ((8309, 8366), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/document_loc"""', 'document_loc'], {}), "('params/document_loc', document_loc)\n", (8329, 8366), True, 'import tensorflow as tf\n'), ((8369, 8440), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/objective_topic_loc"""', 'objective_topic_loc'], {}), "('params/objective_topic_loc', objective_topic_loc)\n", (8389, 8440), True, 'import tensorflow as tf\n'), ((8443, 8504), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/document_scale"""', 'document_scale'], {}), "('params/document_scale', document_scale)\n", (8463, 8504), True, 'import tensorflow as tf\n'), ((8507, 8582), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/objective_topic_scale"""', 'objective_topic_scale'], {}), "('params/objective_topic_scale', objective_topic_scale)\n", (8527, 8582), True, 'import tensorflow as tf\n'), ((9405, 9438), 'numpy.argsort', 'np.argsort', (['(-neutral_mean)'], {'axis': '(1)'}), '(-neutral_mean, axis=1)\n', (9415, 9438), True, 'import numpy as np\n'), ((9462, 9496), 'numpy.argsort', 'np.argsort', (['(-negative_mean)'], {'axis': '(1)'}), '(-negative_mean, axis=1)\n', (9472, 9496), True, 'import numpy as np\n'), ((9520, 9554), 'numpy.argsort', 'np.argsort', (['(-positive_mean)'], {'axis': '(1)'}), '(-positive_mean, axis=1)\n', (9530, 9554), True, 'import numpy as np\n'), ((10648, 10671), 'numpy.array', 'np.array', (['topic_strings'], {}), '(topic_strings)\n', (10656, 10671), True, 'import numpy as np\n'), ((13495, 13539), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': '(0.0)', 'scale': '(1.0)'}), '(loc=0.0, scale=1.0)\n', (13519, 13539), True, 'import tensorflow_probability as tfp\n'), ((14133, 14186), 'tensorflow.gather', 'tf.gather', (['document_samples', 'document_indices'], {'axis': '(1)'}), '(document_samples, document_indices, axis=1)\n', (14142, 14186), True, 'import tensorflow as tf\n'), ((14295, 14349), 'tensorflow.gather', 'tf.gather', (['ideal_point_samples', 'author_indices'], {'axis': '(1)'}), '(ideal_point_samples, author_indices, axis=1)\n', (14304, 14349), True, 'import tensorflow as tf\n'), ((14468, 14571), 'tensorflow.exp', 'tf.exp', (['(selected_ideal_points[:, :, :, tf.newaxis] * ideological_topic_samples[:,\n tf.newaxis, :, :])'], {}), '(selected_ideal_points[:, :, :, tf.newaxis] *\n ideological_topic_samples[:, tf.newaxis, :, :])\n', (14474, 14571), True, 'import tensorflow as tf\n'), ((14688, 14729), 'tensorflow.gather', 'tf.gather', (['author_weights', 'author_indices'], {}), '(author_weights, author_indices)\n', (14697, 14729), True, 'import tensorflow as tf\n'), ((15545, 15718), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(selected_document_samples[:, :, :, tf.newaxis] * objective_topic_samples[:,\n tf.newaxis, :, :] * selected_ideological_topic_samples[:, :, :, :])'], {'axis': '(2)'}), '(selected_document_samples[:, :, :, tf.newaxis] *\n objective_topic_samples[:, tf.newaxis, :, :] *\n selected_ideological_topic_samples[:, :, :, :], axis=2)\n', (15558, 15718), True, 'import tensorflow as tf\n'), ((15763, 15799), 'tensorflow_probability.distributions.Poisson', 'tfp.distributions.Poisson', ([], {'rate': 'rate'}), '(rate=rate)\n', (15788, 15799), True, 'import tensorflow_probability as tfp\n'), ((15976, 16024), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['count_log_likelihood'], {'axis': '[1, 2]'}), '(count_log_likelihood, axis=[1, 2])\n', (15989, 16024), True, 'import tensorflow as tf\n'), ((16223, 16243), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['elbo'], {}), '(elbo)\n', (16237, 16243), True, 'import tensorflow as tf\n'), ((16247, 16283), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""elbo/elbo"""', 'elbo'], {}), "('elbo/elbo', elbo)\n", (16264, 16283), True, 'import tensorflow as tf\n'), ((16563, 16593), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (16581, 16593), True, 'import tensorflow as tf\n'), ((16611, 16644), 'numpy.random.RandomState', 'np.random.RandomState', (['FLAGS.seed'], {}), '(FLAGS.seed)\n', (16632, 16644), True, 'import numpy as np\n'), ((17150, 17183), 'os.path.join', 'os.path.join', (['source_dir', '"""clean"""'], {}), "(source_dir, 'clean')\n", (17162, 17183), False, 'import os\n'), ((17197, 17234), 'os.path.join', 'os.path.join', (['source_dir', '"""tbip-fits"""'], {}), "(source_dir, 'tbip-fits')\n", (17209, 17234), False, 'import os\n'), ((17240, 17265), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['save_dir'], {}), '(save_dir)\n', (17255, 17265), True, 'import tensorflow as tf\n'), ((17383, 17410), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['save_dir'], {}), '(save_dir)\n', (17400, 17410), True, 'import tensorflow as tf\n'), ((18903, 18970), 'tensorflow_probability.distributions.LogNormal', 'tfp.distributions.LogNormal', ([], {'loc': 'document_loc', 'scale': 'document_scale'}), '(loc=document_loc, scale=document_scale)\n', (18930, 18970), True, 'import tensorflow_probability as tfp\n'), ((19018, 19104), 'tensorflow_probability.distributions.LogNormal', 'tfp.distributions.LogNormal', ([], {'loc': 'objective_topic_loc', 'scale': 'objective_topic_scale'}), '(loc=objective_topic_loc, scale=\n objective_topic_scale)\n', (19045, 19104), True, 'import tensorflow_probability as tfp\n'), ((19143, 19243), 'tensorflow.get_variable', 'tf.get_variable', (['"""ideological_topic_loc"""'], {'shape': '[FLAGS.num_topics, num_words]', 'dtype': 'tf.float32'}), "('ideological_topic_loc', shape=[FLAGS.num_topics, num_words\n ], dtype=tf.float32)\n", (19158, 19243), True, 'import tensorflow as tf\n'), ((19292, 19399), 'tensorflow.get_variable', 'tf.get_variable', (['"""ideological_topic_scale_logit"""'], {'shape': '[FLAGS.num_topics, num_words]', 'dtype': 'tf.float32'}), "('ideological_topic_scale_logit', shape=[FLAGS.num_topics,\n num_words], dtype=tf.float32)\n", (19307, 19399), True, 'import tensorflow as tf\n'), ((19443, 19488), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['ideological_topic_scale_logit'], {}), '(ideological_topic_scale_logit)\n', (19457, 19488), True, 'import tensorflow as tf\n'), ((19491, 19566), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/ideological_topic_loc"""', 'ideological_topic_loc'], {}), "('params/ideological_topic_loc', ideological_topic_loc)\n", (19511, 19566), True, 'import tensorflow as tf\n'), ((19569, 19648), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['"""params/ideological_topic_scale"""', 'ideological_topic_scale'], {}), "('params/ideological_topic_scale', ideological_topic_scale)\n", (19589, 19648), True, 'import tensorflow as tf\n'), ((19708, 19795), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'ideological_topic_loc', 'scale': 'ideological_topic_scale'}), '(loc=ideological_topic_loc, scale=\n ideological_topic_scale)\n', (19732, 19795), True, 'import tensorflow_probability as tfp\n'), ((19827, 19900), 'tensorflow.get_variable', 'tf.get_variable', (['"""ideal_point_loc"""'], {'shape': '[num_authors]', 'dtype': 'tf.float32'}), "('ideal_point_loc', shape=[num_authors], dtype=tf.float32)\n", (19842, 19900), True, 'import tensorflow as tf\n'), ((20139, 20178), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['ideal_point_scale_logit'], {}), '(ideal_point_scale_logit)\n', (20153, 20178), True, 'import tensorflow as tf\n'), ((20208, 20278), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': 'ideal_point_loc', 'scale': 'ideal_point_scale'}), '(loc=ideal_point_loc, scale=ideal_point_scale)\n', (20232, 20278), True, 'import tensorflow_probability as tfp\n'), ((20963, 20994), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {}), "('loss', loss)\n", (20980, 20994), True, 'import tensorflow as tf\n'), ((21006, 21063), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'FLAGS.learning_rate'}), '(learning_rate=FLAGS.learning_rate)\n', (21028, 21063), True, 'import tensorflow as tf\n'), ((23316, 23349), 'tensorflow.summary.text', 'tf.summary.text', (['"""topics"""', 'topics'], {}), "('topics', topics)\n", (23331, 23349), True, 'import tensorflow as tf\n'), ((23352, 23401), 'tensorflow.summary.text', 'tf.summary.text', (['"""ideal_points"""', 'ideal_point_list'], {}), "('ideal_points', ideal_point_list)\n", (23367, 23401), True, 'import tensorflow as tf\n'), ((23418, 23440), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (23438, 23440), True, 'import tensorflow as tf\n'), ((23450, 23483), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (23481, 23483), True, 'import tensorflow as tf\n'), ((25890, 25902), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (25900, 25902), True, 'import tensorflow as tf\n'), ((4127, 4163), 'os.path.join', 'os.path.join', (['data_dir', '"""counts.npz"""'], {}), "(data_dir, 'counts.npz')\n", (4139, 4163), False, 'import os\n'), ((4374, 4414), 'os.path.join', 'os.path.join', (['data_dir', '"""author_map.txt"""'], {}), "(data_dir, 'author_map.txt')\n", (4386, 4414), False, 'import os\n'), ((5660, 5700), 'os.path.join', 'os.path.join', (['data_dir', '"""vocabulary.txt"""'], {}), "(data_dir, 'vocabulary.txt')\n", (5672, 5700), False, 'import os\n'), ((6017, 6044), 'numpy.bincount', 'np.bincount', (['author_indices'], {}), '(author_indices)\n', (6028, 6044), True, 'import numpy as np\n'), ((11223, 11267), 'tensorflow_probability.distributions.Normal', 'tfp.distributions.Normal', ([], {'loc': '(0.0)', 'scale': '(1.0)'}), '(loc=0.0, scale=1.0)\n', (11247, 11267), True, 'import tensorflow_probability as tfp\n'), ((15923, 15949), 'tensorflow.sparse.to_dense', 'tf.sparse.to_dense', (['counts'], {}), '(counts)\n', (15941, 15949), True, 'import tensorflow as tf\n'), ((16322, 16347), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['log_prior'], {}), '(log_prior)\n', (16336, 16347), True, 'import tensorflow as tf\n'), ((16419, 16455), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['count_log_likelihood'], {}), '(count_log_likelihood)\n', (16433, 16455), True, 'import tensorflow as tf\n'), ((16493, 16516), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['entropy'], {}), '(entropy)\n', (16507, 16516), True, 'import tensorflow as tf\n'), ((16680, 16705), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (16695, 16705), False, 'import os\n'), ((17344, 17380), 'tensorflow.gfile.DeleteRecursively', 'tf.gfile.DeleteRecursively', (['save_dir'], {}), '(save_dir)\n', (17370, 17380), True, 'import tensorflow as tf\n'), ((17751, 17786), 'os.path.join', 'os.path.join', (['source_dir', '"""pf-fits"""'], {}), "(source_dir, 'pf-fits')\n", (17763, 17786), False, 'import os\n'), ((20365, 20398), 'tensorflow.reshape', 'tf.reshape', (['ideal_point_loc', '[-1]'], {}), '(ideal_point_loc, [-1])\n', (20375, 20398), True, 'import tensorflow as tf\n'), ((20475, 20510), 'tensorflow.reshape', 'tf.reshape', (['ideal_point_scale', '[-1]'], {}), '(ideal_point_scale, [-1])\n', (20485, 20510), True, 'import tensorflow as tf\n'), ((23008, 23062), 'functools.partial', 'functools.partial', (['print_topics'], {'vocabulary': 'vocabulary'}), '(print_topics, vocabulary=vocabulary)\n', (23025, 23062), False, 'import functools\n'), ((23194, 23254), 'functools.partial', 'functools.partial', (['print_ideal_points'], {'author_map': 'author_map'}), '(print_ideal_points, author_map=author_map)\n', (23211, 23254), False, 'import functools\n'), ((23492, 23504), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (23502, 23504), True, 'import tensorflow as tf\n'), ((23535, 23578), 'tensorflow.summary.FileWriter', 'tf.summary.FileWriter', (['save_dir', 'sess.graph'], {}), '(save_dir, sess.graph)\n', (23556, 23578), True, 'import tensorflow as tf\n'), ((23615, 23626), 'time.time', 'time.time', ([], {}), '()\n', (23624, 23626), False, 'import time\n'), ((4884, 4918), 'numpy.int32', 'np.int32', (['(shuffled_counts.data > 0)'], {}), '(shuffled_counts.data > 0)\n', (4892, 4918), True, 'import numpy as np\n'), ((7889, 7938), 'tensorflow.initializers.random_normal', 'tf.initializers.random_normal', ([], {'mean': '(0)', 'stddev': '(1.0)'}), '(mean=0, stddev=1.0)\n', (7918, 7938), True, 'import tensorflow as tf\n'), ((8104, 8153), 'tensorflow.initializers.random_normal', 'tf.initializers.random_normal', ([], {'mean': '(0)', 'stddev': '(1.0)'}), '(mean=0, stddev=1.0)\n', (8133, 8153), True, 'import tensorflow as tf\n'), ((10809, 10836), 'numpy.argsort', 'np.argsort', (['ideal_point_loc'], {}), '(ideal_point_loc)\n', (10819, 10836), True, 'import numpy as np\n'), ((11316, 11368), 'tensorflow_probability.distributions.Gamma', 'tfp.distributions.Gamma', ([], {'concentration': '(0.3)', 'rate': '(0.3)'}), '(concentration=0.3, rate=0.3)\n', (11339, 11368), True, 'import tensorflow_probability as tfp\n'), ((20016, 20065), 'tensorflow.initializers.random_normal', 'tf.initializers.random_normal', ([], {'mean': '(0)', 'stddev': '(1.0)'}), '(mean=0, stddev=1.0)\n', (20045, 20065), True, 'import tensorflow as tf\n'), ((4241, 4285), 'os.path.join', 'os.path.join', (['data_dir', '"""author_indices.npy"""'], {}), "(data_dir, 'author_indices.npy')\n", (4253, 4285), False, 'import os\n'), ((7589, 7617), 'numpy.log', 'np.log', (['initial_document_loc'], {}), '(initial_document_loc)\n', (7595, 7617), True, 'import numpy as np\n'), ((7720, 7755), 'numpy.log', 'np.log', (['initial_objective_topic_loc'], {}), '(initial_objective_topic_loc)\n', (7726, 7755), True, 'import numpy as np\n'), ((24152, 24185), 'os.path.join', 'os.path.join', (['save_dir', '"""params/"""'], {}), "(save_dir, 'params/')\n", (24164, 24185), False, 'import os\n'), ((4986, 5018), 'numpy.log', 'np.log', (['(1 + shuffled_counts.data)'], {}), '(1 + shuffled_counts.data)\n', (4992, 5018), True, 'import numpy as np\n'), ((6206, 6228), 'numpy.sum', 'np.sum', (['counts'], {'axis': '(1)'}), '(counts, axis=1)\n', (6212, 6228), True, 'import numpy as np\n'), ((17832, 17875), 'os.path.join', 'os.path.join', (['fit_dir', '"""document_shape.npy"""'], {}), "(fit_dir, 'document_shape.npy')\n", (17844, 17875), False, 'import os\n'), ((17940, 17982), 'os.path.join', 'os.path.join', (['fit_dir', '"""document_rate.npy"""'], {}), "(fit_dir, 'document_rate.npy')\n", (17952, 17982), False, 'import os\n'), ((18045, 18085), 'os.path.join', 'os.path.join', (['fit_dir', '"""topic_shape.npy"""'], {}), "(fit_dir, 'topic_shape.npy')\n", (18057, 18085), False, 'import os\n'), ((18147, 18186), 'os.path.join', 'os.path.join', (['fit_dir', '"""topic_rate.npy"""'], {}), "(fit_dir, 'topic_rate.npy')\n", (18159, 18186), False, 'import os\n'), ((23734, 23745), 'time.time', 'time.time', ([], {}), '()\n', (23743, 23745), False, 'import time\n'), ((24201, 24232), 'tensorflow.gfile.Exists', 'tf.gfile.Exists', (['param_save_dir'], {}), '(param_save_dir)\n', (24216, 24232), True, 'import tensorflow as tf\n'), ((24244, 24277), 'tensorflow.gfile.MakeDirs', 'tf.gfile.MakeDirs', (['param_save_dir'], {}), '(param_save_dir)\n', (24261, 24277), True, 'import tensorflow as tf\n'), ((24999, 25043), 'os.path.join', 'os.path.join', (['param_save_dir', '"""document_loc"""'], {}), "(param_save_dir, 'document_loc')\n", (25011, 25043), False, 'import os\n'), ((25096, 25142), 'os.path.join', 'os.path.join', (['param_save_dir', '"""document_scale"""'], {}), "(param_save_dir, 'document_scale')\n", (25108, 25142), False, 'import os\n'), ((25197, 25248), 'os.path.join', 'os.path.join', (['param_save_dir', '"""objective_topic_loc"""'], {}), "(param_save_dir, 'objective_topic_loc')\n", (25209, 25248), False, 'import os\n'), ((25308, 25361), 'os.path.join', 'os.path.join', (['param_save_dir', '"""objective_topic_scale"""'], {}), "(param_save_dir, 'objective_topic_scale')\n", (25320, 25361), False, 'import os\n'), ((25423, 25476), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideological_topic_loc"""'], {}), "(param_save_dir, 'ideological_topic_loc')\n", (25435, 25476), False, 'import os\n'), ((25538, 25593), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideological_topic_scale"""'], {}), "(param_save_dir, 'ideological_topic_scale')\n", (25550, 25593), False, 'import os\n'), ((25657, 25704), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideal_point_loc"""'], {}), "(param_save_dir, 'ideal_point_loc')\n", (25669, 25704), False, 'import os\n'), ((25760, 25809), 'os.path.join', 'os.path.join', (['param_save_dir', '"""ideal_point_scale"""'], {}), "(param_save_dir, 'ideal_point_scale')\n", (25772, 25809), False, 'import os\n'), ((5088, 5117), 'numpy.sqrt', 'np.sqrt', (['shuffled_counts.data'], {}), '(shuffled_counts.data)\n', (5095, 5117), True, 'import numpy as np\n'), ((5913, 5935), 'numpy.sum', 'np.sum', (['counts'], {'axis': '(1)'}), '(counts, axis=1)\n', (5919, 5935), True, 'import numpy as np\n')] |
from tkinter import *
from tax_profiler import TaxProfile
from tkinter import messagebox as mb
class Example(Frame, TaxProfile):
def __init__(self, parent):
TaxProfile.__init__(self)
Frame.__init__(self, parent, background="lightblue")
parent.minsize(width=500, height=200)
parent.maxsize(width=500, height=200)
self.parent = parent
self.initUI()
def get_those_numbers(self, event):
try:
self.set_revenue_last(int(self.entry1.get()))
self.set_usn_paid(int(self.entry2.get()))
self.set_oms_paid(int(self.entry3.get()))
self.set_pfr_paid(int(self.entry4.get()))
except ValueError:
mb.showerror("Error", "Введите все данные числами")
return
self.top.destroy()
def kvartal_windows(self):
try:
self.kvartal = int(self.entry_kvartal.get())
except ValueError:
mb.showerror("Error", "Введите квартал числом (1-4)")
if self.kvartal < 1 or self.kvartal > 4:
mb.showerror("Error", "Введите квартал числом (1-4)")
return
self.top_start.destroy()
if self.kvartal == 1:
return
self.top = Toplevel(width=650, height=250)
self.top.minsize(200, 400)
self.top.title("Начало работы")
label1 = Message(
self.top, text="Данные за предыдущие кварталы", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
label1.pack()
label2 = Message(self.top, text="Введите доход:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
label2.pack()
self.entry1 = Entry(self.top)
self.entry1.pack()
label3 = Message(self.top, text="Введите УСН:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 11))
label3.pack()
self.entry2 = Entry(self.top)
self.entry2.pack()
label4 = Message(self.top, text="Введите ПФР:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 11))
label4.pack()
self.entry3 = Entry(self.top)
self.entry3.pack()
label5 = Message(self.top, text="Введите ФФОМС:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 11))
label5.pack()
self.entry4 = Entry(self.top)
self.entry4.pack()
button = Button(self.top, text="Далее")
button.pack()
button.bind("<Button-1>", self.get_those_numbers)
def start_window(self):
self.top_start = Toplevel()
self.top_start.title("Начало работы")
self.top_start.minsize(150, 100)
self.top_start.maxsize(150, 100)
msg = Message(self.top_start, text="Введите текущий квартал")
msg.pack()
self.entry_kvartal = Entry(self.top_start)
self.entry_kvartal.pack()
button = Button(
self.top_start, text="Далее",
command=self.kvartal_windows)
button.pack()
def output(self, event):
default = "0"
self.entry_fond["text"] = default
self.entry_pfr["text"] = default
self.entry_usn["text"] = default
try:
self.set_revenue(int(self.entry_dohod.get()))
if int(self.entry_dohod.get()) <= 0:
mb.showerror("Error", "Введите число в графу доход")
else:
self.entry_fond["text"] = self.get_oms()
self.entry_pfr["text"] = self.get_pfr()
self.entry_usn["text"] = self.get_usn()
except ValueError:
mb.showerror("Error", "Введите число в графу доход")
def initUI(self):
self.parent.title("Калькулятор налогов")
self.pack(fill=BOTH, expand=True)
self.columnconfigure(4, weight=2)
dohod = Label(self, text="Доход:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
dohod.grid(sticky=W, pady=4, padx=10, column=0, row=1)
nalog = Label(self, text="Налоги:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
nalog.grid(sticky=W, pady=10, padx=10, column=2, row=0)
usn = Label(self, text="УСН:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
usn.grid(sticky=W, pady=4, padx=10, column=2, row=1)
pfr = Label(self, text="ПФР:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
pfr.grid(sticky=W, pady=4, padx=10, column=2, row=2)
fond = Label(self, text="ФФОМС:", bg="lightblue", bd=5,
relief="groove", font=("Helvetica", 12))
fond.grid(sticky=W + N, pady=4, padx=10, column=2, row=3)
self.entry_dohod = Entry(self)
self.entry_dohod.grid(sticky=W, pady=4, padx=5, column=1, row=1)
self.entry_usn = Label(self, text=self.get_usn(), bg="white", width=15)
self.entry_usn.grid(sticky=W + N, pady=4, padx=5, column=3, row=1)
self.entry_pfr = Label(self, text=self.get_pfr(), width=15, bg="white")
self.entry_pfr.grid(sticky=W + N, pady=4, padx=5, column=3, row=2)
self.entry_fond = Label(
self, text=self.get_oms(), width=15, bg="white")
self.entry_fond.grid(sticky=W + N, pady=4, padx=5, column=3, row=3)
ras = Button(self, text="Рассчитать", width=30)
ras.grid(row=3, column=0, columnspan=2, sticky=W + S + E + N, padx=10)
self.start_window()
ras.bind("<Button-1>", self.output)
self.centerWindow()
def centerWindow(self):
w = 650
h = 250
sw = self.parent.winfo_screenwidth()
sh = self.parent.winfo_screenheight()
x = (sw - w) / 2
y = (sh - h) / 2
self.parent.geometry('%dx%d+%d+%d' % (w, h, x, y))
def main():
root = Tk()
root.iconbitmap(r'py.ico')
app = Example(root)
root.resizable(width=False, height=False)
root.mainloop()
if __name__ == '__main__':
main()
| [
"tkinter.messagebox.showerror",
"tax_profiler.TaxProfile.__init__"
] | [((171, 196), 'tax_profiler.TaxProfile.__init__', 'TaxProfile.__init__', (['self'], {}), '(self)\n', (190, 196), False, 'from tax_profiler import TaxProfile\n'), ((1068, 1121), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Error"""', '"""Введите квартал числом (1-4)"""'], {}), "('Error', 'Введите квартал числом (1-4)')\n", (1080, 1121), True, 'from tkinter import messagebox as mb\n'), ((714, 765), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Error"""', '"""Введите все данные числами"""'], {}), "('Error', 'Введите все данные числами')\n", (726, 765), True, 'from tkinter import messagebox as mb\n'), ((953, 1006), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Error"""', '"""Введите квартал числом (1-4)"""'], {}), "('Error', 'Введите квартал числом (1-4)')\n", (965, 1006), True, 'from tkinter import messagebox as mb\n'), ((3409, 3461), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Error"""', '"""Введите число в графу доход"""'], {}), "('Error', 'Введите число в графу доход')\n", (3421, 3461), True, 'from tkinter import messagebox as mb\n'), ((3688, 3740), 'tkinter.messagebox.showerror', 'mb.showerror', (['"""Error"""', '"""Введите число в графу доход"""'], {}), "('Error', 'Введите число в графу доход')\n", (3700, 3740), True, 'from tkinter import messagebox as mb\n')] |
from django.contrib import admin
from lms_app.models import Professor
admin.site.register(Professor)
# Register your models here.
| [
"django.contrib.admin.site.register"
] | [((72, 102), 'django.contrib.admin.site.register', 'admin.site.register', (['Professor'], {}), '(Professor)\n', (91, 102), False, 'from django.contrib import admin\n')] |
from JDI.web.selenium.elements.composite.web_site import WebSite
from tests.jdi_uitests_webtests.main.page_objects.w3c_site.frame_page import FramePage
class W3cSite(WebSite):
domain = "https://www.w3schools.com"
frame_page = FramePage(url="/tags/tag_button.asp", domain=domain)
| [
"tests.jdi_uitests_webtests.main.page_objects.w3c_site.frame_page.FramePage"
] | [((237, 289), 'tests.jdi_uitests_webtests.main.page_objects.w3c_site.frame_page.FramePage', 'FramePage', ([], {'url': '"""/tags/tag_button.asp"""', 'domain': 'domain'}), "(url='/tags/tag_button.asp', domain=domain)\n", (246, 289), False, 'from tests.jdi_uitests_webtests.main.page_objects.w3c_site.frame_page import FramePage\n')] |
import copy
import logging
import numpy as np
import six
import tensorflow as tf
from functools import wraps
from contextlib import contextmanager
from .backend_base import BackendBase, FunctionBase, DeviceDecorator
try:
from tensorflow.contrib.distributions import fill_triangular
except:
print("Cannot find fill_triangular")
class TensorflowFunction(FunctionBase):
def __init__(self, *args, **kwargs):
super(TensorflowFunction, self).__init__(*args, **kwargs)
with tf.control_dependencies(self.outputs):
self.updates = [tf.assign(k, v) for k, v in self.updates]
def __call__(self, *inputs):
feed_dict = self.feed_dict(*inputs)
result = self.session.get_current_session().run(self.outputs + self.updates, feed_dict=feed_dict)
if len(self.outputs) == 1:
return result[0]
return result[:len(self.outputs)]
@six.add_metaclass(DeviceDecorator)
class TensorflowBackend(BackendBase):
def __init__(self, **kwargs):
super(TensorflowBackend, self).__init__(**kwargs)
self.core = tf
self._sessions = []
self.set_default_device(self.gpu() if tf.test.is_gpu_available() else self.cpu())
# General purpose methods
@classmethod
def use_device(cls, method):
@wraps(method)
def func(self, *args, **kwargs):
with tf.device(self.get_current_device()):
result = method(self, *args, **kwargs)
return result
return func
def enable_eager(self):
tf.enable_eager_execution()
def cpu(self, id=0):
return 'cpu/:%u' % id
def gpu(self, id=0):
return 'gpu/:%u' % id
@property
def int32(self):
return tf.int32
@property
def float32(self):
return tf.float32
def _placeholder(self, dtype=None, shape=None, name=None):
with self._device(self.get_current_device()):
return tf.placeholder(dtype, shape=shape, name=name)
def _variable(self, initial_value=None, trainable=True, name=None):
with self._device(self.get_current_device()):
return tf.Variable(initial_value=initial_value, trainable=trainable, name=name)
def _device(self, name):
return tf.device(name)
def create_session(self, graph=None, **kwargs):
allow_growth = kwargs.pop('allow_growth', False)
config_proto = tf.ConfigProto(**kwargs)
config_proto.gpu_options.allow_growth = allow_growth
sess = tf.Session(graph=graph, config=config_proto)
self._initialize(sess)
return sess
@contextmanager
def session(self, **kwargs):
with self.create_session(**kwargs) as sess:
self._sessions.append(sess)
self._initialize(sess)
yield sess
self._sessions.pop()
def interactive_session(self, graph=None, **kwargs):
config_proto = tf.ConfigProto(**kwargs)
sess = tf.InteractiveSession(config=config_proto, graph=graph)
self._initialize(sess)
return sess
def get_current_session(self):
if len(self._sessions) == 0:
raise Exception('No current session')
return self._sessions[-1]
def _initialize(self, sess):
sess.run(tf.local_variables_initializer())
sess.run(tf.global_variables_initializer())
# Unified interface
def cast(self, x, dtype):
return tf.cast(x, dtype)
def dtype(self, x):
return x.dtype
def shape(self, x):
return tf.shape(x)
def rank(self, x):
return tf.rank(x)
def abs(self, x):
return tf.abs(x)
def set_value(self, x, value):
tf.assign(x, np.asarray(value)).op.run(session=self.get_current_session())
def zeros(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.zeros(shape, dtype=dtype, name=name)
def zeros_like(self, x, dtype=None, name=None):
return tf.zeros_like(x, dtype=dtype, name=name)
def ones(self, shape, dtype=None, name=None):
dtype = dtype or self.floatx()
return tf.ones(shape, dtype=dtype, name=name)
def ones_like(self, x, dtype=None, name=None):
return tf.ones_like(x, dtype=dtype, name=name)
def random_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_truncated_normal(self, shape, mean=0.0, stddev=1.0, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.truncated_normal(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_uniform(self, shape, minval=0, maxval=None, dtype=None, seed=None):
dtype = dtype or self.floatx()
return tf.random_uniform(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_binomial(self, shape, p=0.5, dtype=None):
dtype = dtype or self.floatx()
return tf.where(tf.random_uniform(shape, dtype=dtype) <= p,
tf.ones(shape, dtype=dtype),
tf.zeros(shape, dtype=dtype))
def random_gamma(self, shape, alpha, beta=None):
return tf.random_gamma(shape, alpha, beta=beta)
pass
def tanh(self, x, name=None):
return tf.tanh(x, name=name)
def sigmoid(self, x, name=None):
return tf.sigmoid(x, name=name)
def relu(self, x, alpha=0., name=None):
return tf.nn.relu(x, name=name)
def softmax(self, x, T=1.0):
return tf.nn.softmax(x)
def softplus(self, x):
return tf.nn.softplus(x)
def dropout(self, x, p, seed=None):
retain_prob = 1. - p
if seed is None:
seed = np.random.randint(10e6)
return tf.nn.dropout(x * 1., retain_prob, seed=seed)
def conv2d(self, x, kernel, strides=(1, 1), border_mode='same',
image_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
dim_ordering: whether to use Theano or TensorFlow dimension ordering
in inputs/kernels/ouputs.
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
# strides = strides# + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.convolution(input=x, filter=kernel, strides=strides, padding=padding,
data_format='NHWC')
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def conv2d_transpose(self, x, kernel, dim_out, strides=(1, 1), border_mode='same'):
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
output_shape = [self.shape(x)[0]] + list(dim_out)
strides = (1,) + strides + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
kernel = tf.cast(kernel, 'float32')
x = tf.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding)
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def pool2d(self, x, pool_size, strides=(1, 1),
border_mode='valid', pool_mode='max'):
'''
pool_size: tuple of 2 integers.
strides: tuple of 2 integers.
border_mode: one of "valid", "same".
dim_ordering: one of "th", "tf".
'''
if border_mode == 'same':
padding = 'SAME'
elif border_mode == 'valid':
padding = 'VALID'
else:
raise Exception('Invalid border mode: ' + str(border_mode))
strides = (1,) + strides + (1,)
pool_size = (1,) + pool_size + (1,)
if self.floatx() == 'float64':
x = tf.cast(x, 'float32')
if pool_mode == 'max':
x = tf.nn.max_pool(x, pool_size, strides, padding=padding)
elif pool_mode == 'avg':
x = tf.nn.avg_pool(x, pool_size, strides, padding=padding)
else:
raise Exception('Invalid pooling mode: ' + str(pool_mode))
if self.floatx() == 'float64':
x = tf.cast(x, 'float64')
return x
def flatten(self, x, leading=1):
leading_dim = self.shape(x)[:leading]
new_shape = tf.concat([leading_dim, [-1]], 0)
return tf.reshape(x, new_shape)
def split(self, x, num_splits, axis=None):
axis = axis % len(x.get_shape())
return tf.split(x, num_splits, axis=axis)
def reshape(self, x, shape):
return tf.reshape(x, shape)
def sum(self, x, axis=None, keepdims=False):
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_sum(x, axis=axis, keepdims=keepdims)
def prod(self, x, axis=None, keepdims=False):
return tf.reduce_prod(x, axis=axis, keepdims=keepdims)
def mean(self, x, axis=None, keepdims=False):
if axis is not None and axis < 0:
axis = axis % len(x.get_shape())
if x.dtype.base_dtype == tf.bool:
x = tf.cast(x, self.floatx())
return tf.reduce_mean(x, axis=axis, keepdims=keepdims)
def batch_norm(self, x, beta, gamma):
mean, variance = tf.nn.moments(x, [0])
normed = tf.nn.batch_normalization(tf.identity(x), mean, variance, beta, gamma, self.epsilon())
return normed
def log(self, x):
return tf.log(x)
def log1p(self, x):
return tf.log1p(x)
def exp(self, x):
return tf.exp(x)
def pow(self, x, a):
return tf.pow(x, a)
def mul(self, x, y):
return tf.multiply(x, y)
def sqrt(self, x):
x = tf.clip_by_value(x,
tf.cast(0., dtype=self.floatx()),
tf.cast(np.inf, dtype=self.floatx()))
return tf.sqrt(x)
def categorical_crossentropy(self, output, target, from_logits=False, axis=-1):
if not from_logits:
# scale preds so that the class probas of each sample sum to 1
output = output / tf.reduce_sum(output, axis, True)
# manual computation of crossentropy
output = tf.clip_by_value(output, self.epsilon(), 1. - self.epsilon())
return -tf.reduce_sum(target * tf.log(output), axis)
else:
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=output, labels=target)
def binary_crossentropy(self, output, target, from_logits=False):
if from_logits:
return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output)
else:
raise NotImplementedError
def concatenate(self, tensors, axis=-1):
return tf.concat(tensors, axis=axis)
def sort(self, tensor):
values, indices = tf.nn.top_k(-tensor, k=tf.shape(tensor)[0])
return -values, indices
def argmin(self, tensor, axis=0):
return tf.argmin(tensor, axis=axis)
def map(self, function, input):
return tf.map_fn(function, input)
def rnn(self, step_function, input, initial_states, **kwargs):
num_dims = self.rank(input)
perm = self.concat([[1, 0], self.range(2, num_dims)])
input = self.transpose(input, perm)
def step(state, input_):
output, state = step_function(input_, state, **kwargs)
return state
result = tf.scan(step, input, initial_states)[0]
return self.transpose(result, perm)
def while_loop(self, condition, body, loop_vars, **kwargs):
return tf.while_loop(condition, body, loop_vars)
def scan(self, fn, elems, initializer=None):
return tf.scan(fn, elems, initializer=initializer, back_prop=True)
def logdet(self, A, **kwargs):
A = (A + self.matrix_transpose(A)) / 2.
term = tf.log(tf.matrix_diag_part(self.cholesky(A, **kwargs)))
return 2 * tf.reduce_sum(term, -1)
def einsum(self, subscripts, *operands):
return tf.einsum(subscripts, *operands)
def cholesky(self, A, lower=True, warn=True, correct=False):
assert lower is True
# Gradient through py_func adapted from https://gist.github.com/harpone/3453185b41d8d985356cbe5e57d67342
def py_func(func, inp, Tout, stateful=True, name=None, grad=None):
rnd_name = 'PyFuncGrad' + str(np.random.randint(0, 1E+8))
tf.RegisterGradient(rnd_name)(grad)
g = tf.get_default_graph()
with g.gradient_override_map({'PyFunc': rnd_name, 'PyFuncStateless': rnd_name}):
return tf.py_func(func, inp, Tout, stateful=stateful, name=name)
def correction(A):
A_new, del_ = A.copy(), 1e-4
while True:
try:
np.linalg.cholesky(A_new)
break
except np.linalg.linalg.LinAlgError:
if warn:
logging.warn('[Cholesky] singular matrix, adding diagonal {}'.format(del_))
A_new = A + del_ * np.eye(A.shape[-1]).astype(self.floatx())
del_ *= 2
return A_new
def _correction_grad(op, grad):
A = op.inputs[0]
return grad
if correct:
shape = A.get_shape()
A = py_func(correction, [A], A.dtype, grad=_correction_grad)
A.set_shape(shape)
return tf.cholesky(A)
# Tensorflow interface
def placeholder(self, dtype, shape=None, name=None):
return self._placeholder(dtype=dtype, shape=shape, name=name)
def variable(self, initial_value=None, trainable=True, name=None):
return self._variable(initial_value=initial_value, trainable=trainable, name=name)
def assign(self, a, b):
return tf.assign(a, b)
def to_float(self, x):
return tf.cast(x, self.floatx())
def constant(self, value, dtype=None, shape=None):
return tf.constant(value, dtype=dtype, shape=shape)
def get_shape(self, x):
return [a.value for a in tf.convert_to_tensor(x).get_shape()]
def get_value(self, variable):
return self.get_current_session().run(variable)
def concat(self, values, axis=-1):
return tf.concat(values, axis=axis)
def gather(self, params, indices):
return tf.gather(params, indices)
def gather_nd(self, params, indices):
return tf.gather_nd(params, indices)
def equal(self, x, y):
return tf.equal(x, y)
def logical_and(self, x, y):
return tf.logical_and(x, y)
def matmul(self, a, b, transpose_a=False, transpose_b=False, a_is_sparse=False, b_is_sparse=False, name=None):
return tf.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b, a_is_sparse=a_is_sparse, name=name)
def trace(self, a):
return tf.trace(a)
def transpose(self, a, perm=None):
return tf.transpose(a, perm=perm)
def matrix_transpose(self, a):
return tf.matrix_transpose(a)
def matrix_diag(self, a):
return tf.matrix_diag(a)
def matrix_diag_part(self, a):
return tf.matrix_diag_part(a)
def set_diag(self, input, diagonal):
return tf.linalg.set_diag(input, diagonal)
def band_part(self, input, num_lower, num_upper):
return tf.linalg.band_part(input, num_lower, num_upper)
def vec(self, A):
A = self.matrix_transpose(A)
leading_dim = self.shape(A)[:-2]
return self.reshape(A, self.concat([
leading_dim,
[-1]
], 0))
def unvec(self, v, m, n):
leading_dim = self.shape(v)[:-1]
return self.matrix_transpose(self.reshape(v, self.concat([
leading_dim,
[n, m]
], 0)))
def kronecker(self, A, B):
C = (A[..., None, None] * B[..., None, None, :, :])
blocks = [
tf.unstack(a, axis=-3 % len(a.shape)) for a in
tf.unstack(C, axis=-4 % len(C.shape))
]
return tf.concat([
tf.concat(a, -1) for a in blocks
], -2)
def block_sum(self, X, m, n):
leading_dim = self.shape(X)[:-2]
block_sum = self.zeros(self.concat([leading_dim, [m, m]], 0))
for i in range(n):
block_sum += X[..., i*m:(i+1)*m, i*m:(i+1)*m]
return block_sum
def block_trace(self, X, m, n):
blocks = []
for i in range(n):
blocks.append([])
for j in range(n):
block = self.trace(X[..., i*m:(i+1)*m, j*m:(j+1)*m])
blocks[-1].append(block)
return self.pack([
self.pack([
b for b in block
])
for block in blocks
])
def kronecker_vec(self, X, m, n):
leading_dim = tf.shape(X)[:-2]
blocks = []
for i in range(n):
blocks.append([])
for j in range(m):
idx = i * m + j
block = tf.matrix_transpose(tf.reshape(X[..., idx, :], tf.concat([leading_dim, [n, m]], 0)))
blocks[-1].append(block)
return tf.concat([tf.concat(b, -2) for b in blocks], -1)
def lower_triangular(self, a):
return fill_triangular(a)
def matrix_inverse(self, a):
return tf.matrix_inverse(a)
def expand_dims(self, x, dim=-1):
return tf.expand_dims(x, dim)
def tile(self, input, multiples):
return tf.tile(input, multiples)
def gradients(self, loss, variables):
return tf.gradients(loss, variables)
def square(self, x):
return tf.square(x)
def clip_by_value(self, x, low, high):
return tf.clip_by_value(x, low, high)
def stack(self, values, axis=0, name='stack'):
return tf.stack(values, axis=axis, name=name)
def unstack(self, values, num=None, axis=0, name='unstack'):
return tf.unstack(values, num=num, axis=axis, name=name)
def pack(self, *args, **kwargs):
return self.stack(*args, **kwargs)
def unpack(self, *args, **kwargs):
return self.unstack(*args, **kwargs)
def reduce_max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def reduce_logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def matrix_solve(self, matrix, rhs, adjoint=None):
return tf.matrix_solve(matrix, rhs, adjoint=adjoint)
# Theano interface
def dim(self, x):
return len(x.get_shape())
def scalar(self, name=None, dtype=None, shape=[]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def vector(self, name=None, dtype=None, shape=[None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def matrix(self, name=None, dtype=None, shape=[None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor3(self, name=None, dtype=None, shape=[None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def tensor4(self, name=None, dtype=None, shape=[None, None, None, None]):
dtype = dtype or self.floatx()
return self._placeholder(dtype=dtype, shape=shape, name=name)
def shared(self, value, name=None):
return self._variable(initial_value=value, name=name)
def arange(self, start, stop=None, step=None):
return self.range(start, stop=stop, step=step)
def sparse_dot(self, x, y):
return tf.sparse_tensor_dense_matmul(x, y)
def dot(self, x, y):
if len(x.get_shape()) != len(y.get_shape()):
len_y = len(y.get_shape())
new_y_shape = tf.concat([tf.shape(x)[:-len_y], tf.shape(y)], 0)
y = tf.broadcast_to(y, new_y_shape)
return tf.matmul(x, y)
def outer(self, x, y):
if len(x.get_shape()) == 0:
return x * y
return x[...,:,None] * y[...,None,:]
def eye(self, d, batch_shape=None):
return tf.eye(d, batch_shape=batch_shape)
def function(self, inputs, outputs, updates=[]):
return TensorflowFunction(self, inputs, outputs, updates)
def grad(self, loss, variables):
return tf.gradients(loss, variables)
def sqr(self, x):
return tf.square(x)
def argmax(self, x, axis=None):
return tf.argmax(x, axis=axis)
def max(self, x, axis=None, keepdims=False):
return tf.reduce_max(x, axis=axis, keepdims=keepdims)
def logsumexp(self, x, axis=None, keepdims=False):
return tf.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
def switch(self, condition, then_expression, else_expression):
'''Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
'''
return tf.where(condition, then_expression, else_expression)
def alloc(self, value, shape, unbroadcast=None, dtype=None):
dtype = dtype or self.floatx()
vals = tf.fill(tf.stack(shape), np.array(value).astype(dtype))
new_shape = []
for s in shape:
if isinstance(s, tf.Tensor):
new_shape.append(None)
else:
new_shape.append(s)
vals.set_shape(new_shape)
return vals
def range(self, start, limit=None, delta=1):
if limit is None:
return tf.range(start, delta=delta)
return tf.range(start, limit, delta=delta)
def solve(self, a, b):
return tf.matrix_solve(a, b)
def one_hot(self, indices, depth):
return tf.one_hot(indices, depth)
# Science methods
def gammaln(self, x):
return tf.lgamma(x)
def multigammaln(self, a, p):
p = self.to_float(p)
p_ = self.cast(p, 'int32')
a = a[..., None]
i = self.to_float(self.range(1, p_ + 1))
term1 = p * (p - 1) / 4. * self.log(np.pi)
term2 = self.gammaln(a - (i - 1) / 2.)
return term1 + self.sum(term2, axis=-1)
def digamma(self, a):
return tf.digamma(a)
| [
"tensorflow.tile",
"tensorflow.matrix_diag_part",
"tensorflow.multiply",
"tensorflow.einsum",
"tensorflow.gradients",
"tensorflow.nn.softplus",
"tensorflow.nn.conv2d_transpose",
"tensorflow.while_loop",
"tensorflow.scan",
"tensorflow.pow",
"tensorflow.Session",
"functools.wraps",
"tensorflow... | [((899, 933), 'six.add_metaclass', 'six.add_metaclass', (['DeviceDecorator'], {}), '(DeviceDecorator)\n', (916, 933), False, 'import six\n'), ((1297, 1310), 'functools.wraps', 'wraps', (['method'], {}), '(method)\n', (1302, 1310), False, 'from functools import wraps\n'), ((1545, 1572), 'tensorflow.enable_eager_execution', 'tf.enable_eager_execution', ([], {}), '()\n', (1570, 1572), True, 'import tensorflow as tf\n'), ((2256, 2271), 'tensorflow.device', 'tf.device', (['name'], {}), '(name)\n', (2265, 2271), True, 'import tensorflow as tf\n'), ((2405, 2429), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '(**kwargs)\n', (2419, 2429), True, 'import tensorflow as tf\n'), ((2506, 2550), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'graph', 'config': 'config_proto'}), '(graph=graph, config=config_proto)\n', (2516, 2550), True, 'import tensorflow as tf\n'), ((2920, 2944), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '(**kwargs)\n', (2934, 2944), True, 'import tensorflow as tf\n'), ((2960, 3015), 'tensorflow.InteractiveSession', 'tf.InteractiveSession', ([], {'config': 'config_proto', 'graph': 'graph'}), '(config=config_proto, graph=graph)\n', (2981, 3015), True, 'import tensorflow as tf\n'), ((3433, 3450), 'tensorflow.cast', 'tf.cast', (['x', 'dtype'], {}), '(x, dtype)\n', (3440, 3450), True, 'import tensorflow as tf\n'), ((3539, 3550), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (3547, 3550), True, 'import tensorflow as tf\n'), ((3590, 3600), 'tensorflow.rank', 'tf.rank', (['x'], {}), '(x)\n', (3597, 3600), True, 'import tensorflow as tf\n'), ((3639, 3648), 'tensorflow.abs', 'tf.abs', (['x'], {}), '(x)\n', (3645, 3648), True, 'import tensorflow as tf\n'), ((3874, 3913), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'dtype', 'name': 'name'}), '(shape, dtype=dtype, name=name)\n', (3882, 3913), True, 'import tensorflow as tf\n'), ((3982, 4022), 'tensorflow.zeros_like', 'tf.zeros_like', (['x'], {'dtype': 'dtype', 'name': 'name'}), '(x, dtype=dtype, name=name)\n', (3995, 4022), True, 'import tensorflow as tf\n'), ((4128, 4166), 'tensorflow.ones', 'tf.ones', (['shape'], {'dtype': 'dtype', 'name': 'name'}), '(shape, dtype=dtype, name=name)\n', (4135, 4166), True, 'import tensorflow as tf\n'), ((4234, 4273), 'tensorflow.ones_like', 'tf.ones_like', (['x'], {'dtype': 'dtype', 'name': 'name'}), '(x, dtype=dtype, name=name)\n', (4246, 4273), True, 'import tensorflow as tf\n'), ((4410, 4483), 'tensorflow.random_normal', 'tf.random_normal', (['shape'], {'mean': 'mean', 'stddev': 'stddev', 'dtype': 'dtype', 'seed': 'seed'}), '(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)\n', (4426, 4483), True, 'import tensorflow as tf\n'), ((4630, 4706), 'tensorflow.truncated_normal', 'tf.truncated_normal', (['shape'], {'mean': 'mean', 'stddev': 'stddev', 'dtype': 'dtype', 'seed': 'seed'}), '(shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)\n', (4649, 4706), True, 'import tensorflow as tf\n'), ((4845, 4923), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'minval': 'minval', 'maxval': 'maxval', 'dtype': 'dtype', 'seed': 'seed'}), '(shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)\n', (4862, 4923), True, 'import tensorflow as tf\n'), ((5303, 5343), 'tensorflow.random_gamma', 'tf.random_gamma', (['shape', 'alpha'], {'beta': 'beta'}), '(shape, alpha, beta=beta)\n', (5318, 5343), True, 'import tensorflow as tf\n'), ((5407, 5428), 'tensorflow.tanh', 'tf.tanh', (['x'], {'name': 'name'}), '(x, name=name)\n', (5414, 5428), True, 'import tensorflow as tf\n'), ((5482, 5506), 'tensorflow.sigmoid', 'tf.sigmoid', (['x'], {'name': 'name'}), '(x, name=name)\n', (5492, 5506), True, 'import tensorflow as tf\n'), ((5567, 5591), 'tensorflow.nn.relu', 'tf.nn.relu', (['x'], {'name': 'name'}), '(x, name=name)\n', (5577, 5591), True, 'import tensorflow as tf\n'), ((5641, 5657), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['x'], {}), '(x)\n', (5654, 5657), True, 'import tensorflow as tf\n'), ((5701, 5718), 'tensorflow.nn.softplus', 'tf.nn.softplus', (['x'], {}), '(x)\n', (5715, 5718), True, 'import tensorflow as tf\n'), ((5872, 5918), 'tensorflow.nn.dropout', 'tf.nn.dropout', (['(x * 1.0)', 'retain_prob'], {'seed': 'seed'}), '(x * 1.0, retain_prob, seed=seed)\n', (5885, 5918), True, 'import tensorflow as tf\n'), ((6651, 6750), 'tensorflow.nn.convolution', 'tf.nn.convolution', ([], {'input': 'x', 'filter': 'kernel', 'strides': 'strides', 'padding': 'padding', 'data_format': '"""NHWC"""'}), "(input=x, filter=kernel, strides=strides, padding=padding,\n data_format='NHWC')\n", (6668, 6750), True, 'import tensorflow as tf\n'), ((7415, 7488), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'kernel', 'output_shape', 'strides'], {'padding': 'padding'}), '(x, kernel, output_shape, strides, padding=padding)\n', (7437, 7488), True, 'import tensorflow as tf\n'), ((8748, 8781), 'tensorflow.concat', 'tf.concat', (['[leading_dim, [-1]]', '(0)'], {}), '([leading_dim, [-1]], 0)\n', (8757, 8781), True, 'import tensorflow as tf\n'), ((8797, 8821), 'tensorflow.reshape', 'tf.reshape', (['x', 'new_shape'], {}), '(x, new_shape)\n', (8807, 8821), True, 'import tensorflow as tf\n'), ((8926, 8960), 'tensorflow.split', 'tf.split', (['x', 'num_splits'], {'axis': 'axis'}), '(x, num_splits, axis=axis)\n', (8934, 8960), True, 'import tensorflow as tf\n'), ((9010, 9030), 'tensorflow.reshape', 'tf.reshape', (['x', 'shape'], {}), '(x, shape)\n', (9020, 9030), True, 'import tensorflow as tf\n'), ((9180, 9226), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (9193, 9226), True, 'import tensorflow as tf\n'), ((9293, 9340), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (9307, 9340), True, 'import tensorflow as tf\n'), ((9578, 9625), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (9592, 9625), True, 'import tensorflow as tf\n'), ((9694, 9715), 'tensorflow.nn.moments', 'tf.nn.moments', (['x', '[0]'], {}), '(x, [0])\n', (9707, 9715), True, 'import tensorflow as tf\n'), ((9880, 9889), 'tensorflow.log', 'tf.log', (['x'], {}), '(x)\n', (9886, 9889), True, 'import tensorflow as tf\n'), ((9930, 9941), 'tensorflow.log1p', 'tf.log1p', (['x'], {}), '(x)\n', (9938, 9941), True, 'import tensorflow as tf\n'), ((9980, 9989), 'tensorflow.exp', 'tf.exp', (['x'], {}), '(x)\n', (9986, 9989), True, 'import tensorflow as tf\n'), ((10031, 10043), 'tensorflow.pow', 'tf.pow', (['x', 'a'], {}), '(x, a)\n', (10037, 10043), True, 'import tensorflow as tf\n'), ((10085, 10102), 'tensorflow.multiply', 'tf.multiply', (['x', 'y'], {}), '(x, y)\n', (10096, 10102), True, 'import tensorflow as tf\n'), ((10304, 10314), 'tensorflow.sqrt', 'tf.sqrt', (['x'], {}), '(x)\n', (10311, 10314), True, 'import tensorflow as tf\n'), ((11167, 11196), 'tensorflow.concat', 'tf.concat', (['tensors'], {'axis': 'axis'}), '(tensors, axis=axis)\n', (11176, 11196), True, 'import tensorflow as tf\n'), ((11382, 11410), 'tensorflow.argmin', 'tf.argmin', (['tensor'], {'axis': 'axis'}), '(tensor, axis=axis)\n', (11391, 11410), True, 'import tensorflow as tf\n'), ((11463, 11489), 'tensorflow.map_fn', 'tf.map_fn', (['function', 'input'], {}), '(function, input)\n', (11472, 11489), True, 'import tensorflow as tf\n'), ((12006, 12047), 'tensorflow.while_loop', 'tf.while_loop', (['condition', 'body', 'loop_vars'], {}), '(condition, body, loop_vars)\n', (12019, 12047), True, 'import tensorflow as tf\n'), ((12113, 12172), 'tensorflow.scan', 'tf.scan', (['fn', 'elems'], {'initializer': 'initializer', 'back_prop': '(True)'}), '(fn, elems, initializer=initializer, back_prop=True)\n', (12120, 12172), True, 'import tensorflow as tf\n'), ((12432, 12464), 'tensorflow.einsum', 'tf.einsum', (['subscripts', '*operands'], {}), '(subscripts, *operands)\n', (12441, 12464), True, 'import tensorflow as tf\n'), ((13852, 13866), 'tensorflow.cholesky', 'tf.cholesky', (['A'], {}), '(A)\n', (13863, 13866), True, 'import tensorflow as tf\n'), ((14230, 14245), 'tensorflow.assign', 'tf.assign', (['a', 'b'], {}), '(a, b)\n', (14239, 14245), True, 'import tensorflow as tf\n'), ((14386, 14430), 'tensorflow.constant', 'tf.constant', (['value'], {'dtype': 'dtype', 'shape': 'shape'}), '(value, dtype=dtype, shape=shape)\n', (14397, 14430), True, 'import tensorflow as tf\n'), ((14677, 14705), 'tensorflow.concat', 'tf.concat', (['values'], {'axis': 'axis'}), '(values, axis=axis)\n', (14686, 14705), True, 'import tensorflow as tf\n'), ((14761, 14787), 'tensorflow.gather', 'tf.gather', (['params', 'indices'], {}), '(params, indices)\n', (14770, 14787), True, 'import tensorflow as tf\n'), ((14846, 14875), 'tensorflow.gather_nd', 'tf.gather_nd', (['params', 'indices'], {}), '(params, indices)\n', (14858, 14875), True, 'import tensorflow as tf\n'), ((14919, 14933), 'tensorflow.equal', 'tf.equal', (['x', 'y'], {}), '(x, y)\n', (14927, 14933), True, 'import tensorflow as tf\n'), ((14983, 15003), 'tensorflow.logical_and', 'tf.logical_and', (['x', 'y'], {}), '(x, y)\n', (14997, 15003), True, 'import tensorflow as tf\n'), ((15135, 15240), 'tensorflow.matmul', 'tf.matmul', (['a', 'b'], {'transpose_a': 'transpose_a', 'transpose_b': 'transpose_b', 'a_is_sparse': 'a_is_sparse', 'name': 'name'}), '(a, b, transpose_a=transpose_a, transpose_b=transpose_b,\n a_is_sparse=a_is_sparse, name=name)\n', (15144, 15240), True, 'import tensorflow as tf\n'), ((15277, 15288), 'tensorflow.trace', 'tf.trace', (['a'], {}), '(a)\n', (15285, 15288), True, 'import tensorflow as tf\n'), ((15344, 15370), 'tensorflow.transpose', 'tf.transpose', (['a'], {'perm': 'perm'}), '(a, perm=perm)\n', (15356, 15370), True, 'import tensorflow as tf\n'), ((15422, 15444), 'tensorflow.matrix_transpose', 'tf.matrix_transpose', (['a'], {}), '(a)\n', (15441, 15444), True, 'import tensorflow as tf\n'), ((15491, 15508), 'tensorflow.matrix_diag', 'tf.matrix_diag', (['a'], {}), '(a)\n', (15505, 15508), True, 'import tensorflow as tf\n'), ((15560, 15582), 'tensorflow.matrix_diag_part', 'tf.matrix_diag_part', (['a'], {}), '(a)\n', (15579, 15582), True, 'import tensorflow as tf\n'), ((15640, 15675), 'tensorflow.linalg.set_diag', 'tf.linalg.set_diag', (['input', 'diagonal'], {}), '(input, diagonal)\n', (15658, 15675), True, 'import tensorflow as tf\n'), ((15746, 15794), 'tensorflow.linalg.band_part', 'tf.linalg.band_part', (['input', 'num_lower', 'num_upper'], {}), '(input, num_lower, num_upper)\n', (15765, 15794), True, 'import tensorflow as tf\n'), ((17651, 17669), 'tensorflow.contrib.distributions.fill_triangular', 'fill_triangular', (['a'], {}), '(a)\n', (17666, 17669), False, 'from tensorflow.contrib.distributions import fill_triangular\n'), ((17719, 17739), 'tensorflow.matrix_inverse', 'tf.matrix_inverse', (['a'], {}), '(a)\n', (17736, 17739), True, 'import tensorflow as tf\n'), ((17794, 17816), 'tensorflow.expand_dims', 'tf.expand_dims', (['x', 'dim'], {}), '(x, dim)\n', (17808, 17816), True, 'import tensorflow as tf\n'), ((17871, 17896), 'tensorflow.tile', 'tf.tile', (['input', 'multiples'], {}), '(input, multiples)\n', (17878, 17896), True, 'import tensorflow as tf\n'), ((17955, 17984), 'tensorflow.gradients', 'tf.gradients', (['loss', 'variables'], {}), '(loss, variables)\n', (17967, 17984), True, 'import tensorflow as tf\n'), ((18026, 18038), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (18035, 18038), True, 'import tensorflow as tf\n'), ((18098, 18128), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', 'low', 'high'], {}), '(x, low, high)\n', (18114, 18128), True, 'import tensorflow as tf\n'), ((18196, 18234), 'tensorflow.stack', 'tf.stack', (['values'], {'axis': 'axis', 'name': 'name'}), '(values, axis=axis, name=name)\n', (18204, 18234), True, 'import tensorflow as tf\n'), ((18316, 18365), 'tensorflow.unstack', 'tf.unstack', (['values'], {'num': 'num', 'axis': 'axis', 'name': 'name'}), '(values, num=num, axis=axis, name=name)\n', (18326, 18365), True, 'import tensorflow as tf\n'), ((18604, 18650), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (18617, 18650), True, 'import tensorflow as tf\n'), ((18729, 18781), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (18748, 18781), True, 'import tensorflow as tf\n'), ((18853, 18898), 'tensorflow.matrix_solve', 'tf.matrix_solve', (['matrix', 'rhs'], {'adjoint': 'adjoint'}), '(matrix, rhs, adjoint=adjoint)\n', (18868, 18898), True, 'import tensorflow as tf\n'), ((20117, 20152), 'tensorflow.sparse_tensor_dense_matmul', 'tf.sparse_tensor_dense_matmul', (['x', 'y'], {}), '(x, y)\n', (20146, 20152), True, 'import tensorflow as tf\n'), ((20410, 20425), 'tensorflow.matmul', 'tf.matmul', (['x', 'y'], {}), '(x, y)\n', (20419, 20425), True, 'import tensorflow as tf\n'), ((20616, 20650), 'tensorflow.eye', 'tf.eye', (['d'], {'batch_shape': 'batch_shape'}), '(d, batch_shape=batch_shape)\n', (20622, 20650), True, 'import tensorflow as tf\n'), ((20824, 20853), 'tensorflow.gradients', 'tf.gradients', (['loss', 'variables'], {}), '(loss, variables)\n', (20836, 20853), True, 'import tensorflow as tf\n'), ((20892, 20904), 'tensorflow.square', 'tf.square', (['x'], {}), '(x)\n', (20901, 20904), True, 'import tensorflow as tf\n'), ((20957, 20980), 'tensorflow.argmax', 'tf.argmax', (['x'], {'axis': 'axis'}), '(x, axis=axis)\n', (20966, 20980), True, 'import tensorflow as tf\n'), ((21046, 21092), 'tensorflow.reduce_max', 'tf.reduce_max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (21059, 21092), True, 'import tensorflow as tf\n'), ((21164, 21216), 'tensorflow.reduce_logsumexp', 'tf.reduce_logsumexp', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (21183, 21216), True, 'import tensorflow as tf\n'), ((21677, 21730), 'tensorflow.where', 'tf.where', (['condition', 'then_expression', 'else_expression'], {}), '(condition, then_expression, else_expression)\n', (21685, 21730), True, 'import tensorflow as tf\n'), ((22281, 22316), 'tensorflow.range', 'tf.range', (['start', 'limit'], {'delta': 'delta'}), '(start, limit, delta=delta)\n', (22289, 22316), True, 'import tensorflow as tf\n'), ((22360, 22381), 'tensorflow.matrix_solve', 'tf.matrix_solve', (['a', 'b'], {}), '(a, b)\n', (22375, 22381), True, 'import tensorflow as tf\n'), ((22437, 22463), 'tensorflow.one_hot', 'tf.one_hot', (['indices', 'depth'], {}), '(indices, depth)\n', (22447, 22463), True, 'import tensorflow as tf\n'), ((22529, 22541), 'tensorflow.lgamma', 'tf.lgamma', (['x'], {}), '(x)\n', (22538, 22541), True, 'import tensorflow as tf\n'), ((22903, 22916), 'tensorflow.digamma', 'tf.digamma', (['a'], {}), '(a)\n', (22913, 22916), True, 'import tensorflow as tf\n'), ((498, 535), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['self.outputs'], {}), '(self.outputs)\n', (521, 535), True, 'import tensorflow as tf\n'), ((1946, 1991), 'tensorflow.placeholder', 'tf.placeholder', (['dtype'], {'shape': 'shape', 'name': 'name'}), '(dtype, shape=shape, name=name)\n', (1960, 1991), True, 'import tensorflow as tf\n'), ((2138, 2210), 'tensorflow.Variable', 'tf.Variable', ([], {'initial_value': 'initial_value', 'trainable': 'trainable', 'name': 'name'}), '(initial_value=initial_value, trainable=trainable, name=name)\n', (2149, 2210), True, 'import tensorflow as tf\n'), ((3275, 3307), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (3305, 3307), True, 'import tensorflow as tf\n'), ((3326, 3359), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (3357, 3359), True, 'import tensorflow as tf\n'), ((5132, 5159), 'tensorflow.ones', 'tf.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5139, 5159), True, 'import tensorflow as tf\n'), ((5204, 5232), 'tensorflow.zeros', 'tf.zeros', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5212, 5232), True, 'import tensorflow as tf\n'), ((5833, 5862), 'numpy.random.randint', 'np.random.randint', (['(10000000.0)'], {}), '(10000000.0)\n', (5850, 5862), True, 'import numpy as np\n'), ((6568, 6589), 'tensorflow.cast', 'tf.cast', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (6575, 6589), True, 'import tensorflow as tf\n'), ((6611, 6637), 'tensorflow.cast', 'tf.cast', (['kernel', '"""float32"""'], {}), "(kernel, 'float32')\n", (6618, 6637), True, 'import tensorflow as tf\n'), ((6833, 6854), 'tensorflow.cast', 'tf.cast', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (6840, 6854), True, 'import tensorflow as tf\n'), ((7332, 7353), 'tensorflow.cast', 'tf.cast', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (7339, 7353), True, 'import tensorflow as tf\n'), ((7375, 7401), 'tensorflow.cast', 'tf.cast', (['kernel', '"""float32"""'], {}), "(kernel, 'float32')\n", (7382, 7401), True, 'import tensorflow as tf\n'), ((7545, 7566), 'tensorflow.cast', 'tf.cast', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (7552, 7566), True, 'import tensorflow as tf\n'), ((8235, 8256), 'tensorflow.cast', 'tf.cast', (['x', '"""float32"""'], {}), "(x, 'float32')\n", (8242, 8256), True, 'import tensorflow as tf\n'), ((8305, 8359), 'tensorflow.nn.max_pool', 'tf.nn.max_pool', (['x', 'pool_size', 'strides'], {'padding': 'padding'}), '(x, pool_size, strides, padding=padding)\n', (8319, 8359), True, 'import tensorflow as tf\n'), ((8605, 8626), 'tensorflow.cast', 'tf.cast', (['x', '"""float64"""'], {}), "(x, 'float64')\n", (8612, 8626), True, 'import tensorflow as tf\n'), ((9759, 9773), 'tensorflow.identity', 'tf.identity', (['x'], {}), '(x)\n', (9770, 9773), True, 'import tensorflow as tf\n'), ((10797, 10869), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'output', 'labels': 'target'}), '(logits=output, labels=target)\n', (10839, 10869), True, 'import tensorflow as tf\n'), ((10984, 11053), 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', ([], {'labels': 'target', 'logits': 'output'}), '(labels=target, logits=output)\n', (11023, 11053), True, 'import tensorflow as tf\n'), ((11842, 11878), 'tensorflow.scan', 'tf.scan', (['step', 'input', 'initial_states'], {}), '(step, input, initial_states)\n', (11849, 11878), True, 'import tensorflow as tf\n'), ((12347, 12370), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['term', '(-1)'], {}), '(term, -1)\n', (12360, 12370), True, 'import tensorflow as tf\n'), ((12883, 12905), 'tensorflow.get_default_graph', 'tf.get_default_graph', ([], {}), '()\n', (12903, 12905), True, 'import tensorflow as tf\n'), ((17228, 17239), 'tensorflow.shape', 'tf.shape', (['X'], {}), '(X)\n', (17236, 17239), True, 'import tensorflow as tf\n'), ((20363, 20394), 'tensorflow.broadcast_to', 'tf.broadcast_to', (['y', 'new_y_shape'], {}), '(y, new_y_shape)\n', (20378, 20394), True, 'import tensorflow as tf\n'), ((21859, 21874), 'tensorflow.stack', 'tf.stack', (['shape'], {}), '(shape)\n', (21867, 21874), True, 'import tensorflow as tf\n'), ((22237, 22265), 'tensorflow.range', 'tf.range', (['start'], {'delta': 'delta'}), '(start, delta=delta)\n', (22245, 22265), True, 'import tensorflow as tf\n'), ((565, 580), 'tensorflow.assign', 'tf.assign', (['k', 'v'], {}), '(k, v)\n', (574, 580), True, 'import tensorflow as tf\n'), ((1162, 1188), 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), '()\n', (1186, 1188), True, 'import tensorflow as tf\n'), ((5045, 5082), 'tensorflow.random_uniform', 'tf.random_uniform', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (5062, 5082), True, 'import tensorflow as tf\n'), ((8409, 8463), 'tensorflow.nn.avg_pool', 'tf.nn.avg_pool', (['x', 'pool_size', 'strides'], {'padding': 'padding'}), '(x, pool_size, strides, padding=padding)\n', (8423, 8463), True, 'import tensorflow as tf\n'), ((10533, 10566), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['output', 'axis', '(True)'], {}), '(output, axis, True)\n', (10546, 10566), True, 'import tensorflow as tf\n'), ((12831, 12860), 'tensorflow.RegisterGradient', 'tf.RegisterGradient', (['rnd_name'], {}), '(rnd_name)\n', (12850, 12860), True, 'import tensorflow as tf\n'), ((13022, 13079), 'tensorflow.py_func', 'tf.py_func', (['func', 'inp', 'Tout'], {'stateful': 'stateful', 'name': 'name'}), '(func, inp, Tout, stateful=stateful, name=name)\n', (13032, 13079), True, 'import tensorflow as tf\n'), ((16466, 16482), 'tensorflow.concat', 'tf.concat', (['a', '(-1)'], {}), '(a, -1)\n', (16475, 16482), True, 'import tensorflow as tf\n'), ((17561, 17577), 'tensorflow.concat', 'tf.concat', (['b', '(-2)'], {}), '(b, -2)\n', (17570, 17577), True, 'import tensorflow as tf\n'), ((11275, 11291), 'tensorflow.shape', 'tf.shape', (['tensor'], {}), '(tensor)\n', (11283, 11291), True, 'import tensorflow as tf\n'), ((12791, 12824), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100000000.0)'], {}), '(0, 100000000.0)\n', (12808, 12824), True, 'import numpy as np\n'), ((13214, 13239), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['A_new'], {}), '(A_new)\n', (13232, 13239), True, 'import numpy as np\n'), ((20330, 20341), 'tensorflow.shape', 'tf.shape', (['y'], {}), '(y)\n', (20338, 20341), True, 'import tensorflow as tf\n'), ((21876, 21891), 'numpy.array', 'np.array', (['value'], {}), '(value)\n', (21884, 21891), True, 'import numpy as np\n'), ((3706, 3723), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (3716, 3723), True, 'import numpy as np\n'), ((10742, 10756), 'tensorflow.log', 'tf.log', (['output'], {}), '(output)\n', (10748, 10756), True, 'import tensorflow as tf\n'), ((14493, 14516), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {}), '(x)\n', (14513, 14516), True, 'import tensorflow as tf\n'), ((17456, 17491), 'tensorflow.concat', 'tf.concat', (['[leading_dim, [n, m]]', '(0)'], {}), '([leading_dim, [n, m]], 0)\n', (17465, 17491), True, 'import tensorflow as tf\n'), ((20308, 20319), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (20316, 20319), True, 'import tensorflow as tf\n'), ((13487, 13506), 'numpy.eye', 'np.eye', (['A.shape[-1]'], {}), '(A.shape[-1])\n', (13493, 13506), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division, print_function
import cv2
import pandas as pd
import numpy as np
import six
import ubelt as ub
from six.moves import zip_longest
from os.path import join, dirname
import warnings
def multi_plot(xdata=None, ydata=[], **kwargs):
r"""
plots multiple lines, bars, etc...
This is the big function that implements almost all of the heavy lifting in
this file. Any function not using this should probably find a way to use
it. It is pretty general and relatively clean.
Args:
xdata (ndarray): can also be a list of arrays
ydata (list or dict of ndarrays): can also be a single array
**kwargs:
Misc:
fnum, pnum, use_legend, legend_loc
Labels:
xlabel, ylabel, title, figtitle
ticksize, titlesize, legendsize, labelsize
Grid:
gridlinewidth, gridlinestyle
Ticks:
num_xticks, num_yticks, tickwidth, ticklength, ticksize
Data:
xmin, xmax, ymin, ymax, spread_list
# can append _list to any of these
# these can be dictionaries if ydata was also a dict
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle']
any plot_kw key can be a scalar (corresponding to all ydatas),
a list if ydata was specified as a list, or a dict if ydata was
specified as a dict.
kind = ['bar', 'plot', ...]
if kind='plot':
spread
if kind='bar':
stacked, width
References:
matplotlib.org/examples/api/barchart_demo.html
CommandLine:
python -m netharn.util.mplutil multi_plot:0 --show
python -m netharn.util.mplutil multi_plot:1 --show
Example:
>>> autompl()
>>> xdata = [1, 2, 3, 4, 5]
>>> ydata_list = [[1, 2, 3, 4, 5], [3, 3, 3, 3, 3], [5, 4, np.nan, 2, 1], [4, 3, np.nan, 1, 0]]
>>> kwargs = {'label': ['spamΣ', 'eggs', 'jamµ', 'pram'], 'linestyle': '-'}
>>> #fig = multi_plot(xdata, ydata_list, title='$\phi_1(\\vec{x})$', xlabel='\nfds', **kwargs)
>>> fig = multi_plot(xdata, ydata_list, title='ΣΣΣµµµ', xlabel='\nfdsΣΣΣµµµ', **kwargs)
>>> show_if_requested()
Example:
>>> autompl()
>>> fig1 = multi_plot([1, 2, 3], [4, 5, 6])
>>> fig2 = multi_plot([1, 2, 3], [4, 5, 6], fnum=4)
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
ydata_list = ydata
if isinstance(ydata_list, dict):
# Special case where ydata is a dictionary
if isinstance(xdata, six.string_types):
# Special-er case where xdata is specified in ydata
xkey = xdata
ykeys = set(ydata_list.keys()) - {xkey}
xdata = ydata_list[xkey]
else:
ykeys = list(ydata_list.keys())
# Normalize input
ydata_list = list(ub.take(ydata_list, ykeys))
kwargs['label_list'] = kwargs.get('label_list', ykeys)
else:
ykeys = None
def is_listlike(data):
flag = isinstance(data, (list, np.ndarray, tuple, pd.Series))
flag &= hasattr(data, '__getitem__') and hasattr(data, '__len__')
return flag
def is_list_of_scalars(data):
if is_listlike(data):
if len(data) > 0 and not is_listlike(data[0]):
return True
return False
def is_list_of_lists(data):
if is_listlike(data):
if len(data) > 0 and is_listlike(data[0]):
return True
return False
# allow ydata_list to be passed without a container
if is_list_of_scalars(ydata_list):
ydata_list = [np.array(ydata_list)]
if xdata is None:
xdata = list(range(len(ydata_list[0])))
num_lines = len(ydata_list)
# Transform xdata into xdata_list
if is_list_of_lists(xdata):
xdata_list = [np.array(xd, copy=True) for xd in xdata]
else:
xdata_list = [np.array(xdata, copy=True)] * num_lines
fnum = ensure_fnum(kwargs.get('fnum', None))
pnum = kwargs.get('pnum', None)
kind = kwargs.get('kind', 'plot')
transpose = kwargs.get('transpose', False)
def parsekw_list(key, kwargs, num_lines=num_lines, ykeys=ykeys):
""" copies relevant plot commands into plot_list_kw """
if key in kwargs:
val_list = kwargs[key]
elif key + '_list' in kwargs:
warnings.warn('*_list is depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + '_list']
elif key + 's' in kwargs:
# hack, multiple ways to do something
warnings.warn('*s depricated, just use kwarg {}'.format(key))
val_list = kwargs[key + 's']
else:
val_list = None
if val_list is not None:
if isinstance(val_list, dict):
if ykeys is None:
raise ValueError('ydata is not a dict, but a property was.')
else:
val_list = [val_list[key] for key in ykeys]
if not isinstance(val_list, list):
val_list = [val_list] * num_lines
return val_list
# Parse out arguments to ax.plot
plot_kw_keys = ['label', 'color', 'marker', 'markersize',
'markeredgewidth', 'linewidth', 'linestyle', 'alpha']
# hackish / extra args that dont go to plot, but help
extra_plot_kw_keys = ['spread_alpha', 'autolabel', 'edgecolor', 'fill']
plot_kw_keys += extra_plot_kw_keys
plot_ks_vals = [parsekw_list(key, kwargs) for key in plot_kw_keys]
plot_list_kw = dict([
(key, vals)
for key, vals in zip(plot_kw_keys, plot_ks_vals) if vals is not None
])
if 'color' not in plot_list_kw:
plot_list_kw['color'] = distinct_colors(num_lines)
if kind == 'plot':
if 'marker' not in plot_list_kw:
plot_list_kw['marker'] = distinct_markers(num_lines)
if 'spread_alpha' not in plot_list_kw:
plot_list_kw['spread_alpha'] = [.2] * num_lines
if kind == 'bar':
# Remove non-bar kwargs
for key in ['markeredgewidth', 'linewidth', 'marker', 'markersize', 'linestyle']:
plot_list_kw.pop(key, None)
stacked = kwargs.get('stacked', False)
width_key = 'height' if transpose else 'width'
if 'width_list' in kwargs:
plot_list_kw[width_key] = kwargs['width_list']
else:
width = kwargs.get('width', .9)
# if width is None:
# # HACK: need variable width
# # width = np.mean(np.diff(xdata_list[0]))
# width = .9
if not stacked:
width /= num_lines
#plot_list_kw['orientation'] = ['horizontal'] * num_lines
plot_list_kw[width_key] = [width] * num_lines
spread_list = kwargs.get('spread_list', None)
if spread_list is None:
pass
# nest into a list of dicts for each line in the multiplot
valid_keys = list(set(plot_list_kw.keys()) - set(extra_plot_kw_keys))
valid_vals = list(ub.dict_take(plot_list_kw, valid_keys))
plot_kw_list = [dict(zip(valid_keys, vals)) for vals in zip(*valid_vals)]
extra_kw_keys = [key for key in extra_plot_kw_keys if key in plot_list_kw]
extra_kw_vals = list(ub.dict_take(plot_list_kw, extra_kw_keys))
extra_kw_list = [dict(zip(extra_kw_keys, vals)) for vals in zip(*extra_kw_vals)]
# Get passed in axes or setup a new figure
ax = kwargs.get('ax', None)
if ax is None:
doclf = kwargs.get('doclf', False)
fig = figure(fnum=fnum, pnum=pnum, docla=False, doclf=doclf)
ax = plt.gca()
else:
plt.sca(ax)
fig = ax.figure
# +---------------
# Draw plot lines
ydata_list = np.array(ydata_list)
if transpose:
if kind == 'bar':
plot_func = ax.barh
elif kind == 'plot':
def plot_func(_x, _y, **kw):
return ax.plot(_y, _x, **kw)
else:
plot_func = getattr(ax, kind) # usually ax.plot
assert len(ydata_list) > 0, 'no ydata'
#assert len(extra_kw_list) == len(plot_kw_list), 'bad length'
#assert len(extra_kw_list) == len(ydata_list), 'bad length'
_iter = enumerate(zip_longest(xdata_list, ydata_list, plot_kw_list, extra_kw_list))
for count, (_xdata, _ydata, plot_kw, extra_kw) in _iter:
ymask = np.isfinite(_ydata)
ydata_ = _ydata.compress(ymask)
xdata_ = _xdata.compress(ymask)
if kind == 'bar':
if stacked:
# Plot bars on top of each other
xdata_ = xdata_
else:
# Plot bars side by side
baseoffset = (width * num_lines) / 2
lineoffset = (width * count)
offset = baseoffset - lineoffset # Fixeme for more histogram bars
xdata_ = xdata_ - offset
# width_key = 'height' if transpose else 'width'
# plot_kw[width_key] = np.diff(xdata)
objs = plot_func(xdata_, ydata_, **plot_kw)
if kind == 'bar':
if extra_kw is not None and 'edgecolor' in extra_kw:
for rect in objs:
rect.set_edgecolor(extra_kw['edgecolor'])
if extra_kw is not None and extra_kw.get('autolabel', False):
# FIXME: probably a more cannonical way to include bar
# autolabeling with tranpose support, but this is a hack that
# works for now
for rect in objs:
if transpose:
numlbl = width = rect.get_width()
xpos = width + ((_xdata.max() - _xdata.min()) * .005)
ypos = rect.get_y() + rect.get_height() / 2.
ha, va = 'left', 'center'
else:
numlbl = height = rect.get_height()
xpos = rect.get_x() + rect.get_width() / 2.
ypos = 1.05 * height
ha, va = 'center', 'bottom'
barlbl = '%.3f' % (numlbl,)
ax.text(xpos, ypos, barlbl, ha=ha, va=va)
# print('extra_kw = %r' % (extra_kw,))
if kind == 'plot' and extra_kw.get('fill', False):
ax.fill_between(_xdata, ydata_, alpha=plot_kw.get('alpha', 1.0),
color=plot_kw.get('color', None)) # , zorder=0)
if spread_list is not None:
# Plots a spread around plot lines usually indicating standard
# deviation
_xdata = np.array(_xdata)
spread = spread_list[count]
ydata_ave = np.array(ydata_)
y_data_dev = np.array(spread)
y_data_max = ydata_ave + y_data_dev
y_data_min = ydata_ave - y_data_dev
ax = plt.gca()
spread_alpha = extra_kw['spread_alpha']
ax.fill_between(_xdata, y_data_min, y_data_max, alpha=spread_alpha,
color=plot_kw.get('color', None)) # , zorder=0)
# L________________
#max_y = max(np.max(y_data), max_y)
#min_y = np.min(y_data) if min_y is None else min(np.min(y_data), min_y)
ydata = _ydata # HACK
xdata = _xdata # HACK
if transpose:
#xdata_list = ydata_list
ydata = xdata
# Hack / Fix any transpose issues
def transpose_key(key):
if key.startswith('x'):
return 'y' + key[1:]
elif key.startswith('y'):
return 'x' + key[1:]
elif key.startswith('num_x'):
# hackier, fixme to use regex or something
return 'num_y' + key[5:]
elif key.startswith('num_y'):
# hackier, fixme to use regex or something
return 'num_x' + key[5:]
else:
return key
kwargs = {transpose_key(key): val for key, val in kwargs.items()}
# Setup axes labeling
title = kwargs.get('title', None)
xlabel = kwargs.get('xlabel', '')
ylabel = kwargs.get('ylabel', '')
def none_or_unicode(text):
return None if text is None else ub.ensure_unicode(text)
xlabel = none_or_unicode(xlabel)
ylabel = none_or_unicode(ylabel)
title = none_or_unicode(title)
# Initial integration with mpl rcParams standards
mplrc = mpl.rcParams.copy()
mplrc.update({
# 'legend.fontsize': custom_figure.LEGEND_SIZE,
# 'axes.titlesize': custom_figure.TITLE_SIZE,
# 'axes.labelsize': custom_figure.LABEL_SIZE,
# 'legend.facecolor': 'w',
# 'font.family': 'sans-serif',
# 'xtick.labelsize': custom_figure.TICK_SIZE,
# 'ytick.labelsize': custom_figure.TICK_SIZE,
})
mplrc.update(kwargs.get('rcParams', {}))
titlesize = kwargs.get('titlesize', mplrc['axes.titlesize'])
labelsize = kwargs.get('labelsize', mplrc['axes.labelsize'])
legendsize = kwargs.get('legendsize', mplrc['legend.fontsize'])
xticksize = kwargs.get('ticksize', mplrc['xtick.labelsize'])
yticksize = kwargs.get('ticksize', mplrc['ytick.labelsize'])
family = kwargs.get('fontfamily', mplrc['font.family'])
tickformat = kwargs.get('tickformat', None)
ytickformat = kwargs.get('ytickformat', tickformat)
xtickformat = kwargs.get('xtickformat', tickformat)
# 'DejaVu Sans','Verdana', 'Arial'
weight = kwargs.get('fontweight', None)
if weight is None:
weight = 'normal'
labelkw = {
'fontproperties': mpl.font_manager.FontProperties(
weight=weight,
family=family, size=labelsize)
}
ax.set_xlabel(xlabel, **labelkw)
ax.set_ylabel(ylabel, **labelkw)
tick_fontprop = mpl.font_manager.FontProperties(family=family,
weight=weight)
if tick_fontprop is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontproperties(tick_fontprop)
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontproperties(tick_fontprop)
if xticksize is not None:
for ticklabel in ax.get_xticklabels():
ticklabel.set_fontsize(xticksize)
if yticksize is not None:
for ticklabel in ax.get_yticklabels():
ticklabel.set_fontsize(yticksize)
if xtickformat is not None:
# mpl.ticker.StrMethodFormatter # newstyle
# mpl.ticker.FormatStrFormatter # oldstyle
ax.xaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(xtickformat))
if ytickformat is not None:
ax.yaxis.set_major_formatter(mpl.ticker.FormatStrFormatter(ytickformat))
xtick_kw = ytick_kw = {
'width': kwargs.get('tickwidth', None),
'length': kwargs.get('ticklength', None),
}
xtick_kw = {k: v for k, v in xtick_kw.items() if v is not None}
ytick_kw = {k: v for k, v in ytick_kw.items() if v is not None}
ax.xaxis.set_tick_params(**xtick_kw)
ax.yaxis.set_tick_params(**ytick_kw)
#ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%d'))
# Setup axes limits
if 'xlim' in kwargs:
xlim = kwargs['xlim']
if xlim is not None:
if 'xmin' not in kwargs and 'xmax' not in kwargs:
kwargs['xmin'] = xlim[0]
kwargs['xmax'] = xlim[1]
else:
raise ValueError('use xmax, xmin instead of xlim')
if 'ylim' in kwargs:
ylim = kwargs['ylim']
if ylim is not None:
if 'ymin' not in kwargs and 'ymax' not in kwargs:
kwargs['ymin'] = ylim[0]
kwargs['ymax'] = ylim[1]
else:
raise ValueError('use ymax, ymin instead of ylim')
xmin = kwargs.get('xmin', ax.get_xlim()[0])
xmax = kwargs.get('xmax', ax.get_xlim()[1])
ymin = kwargs.get('ymin', ax.get_ylim()[0])
ymax = kwargs.get('ymax', ax.get_ylim()[1])
text_type = six.text_type
if text_type(xmax) == 'data':
xmax = max([xd.max() for xd in xdata_list])
if text_type(xmin) == 'data':
xmin = min([xd.min() for xd in xdata_list])
# Setup axes ticks
num_xticks = kwargs.get('num_xticks', None)
num_yticks = kwargs.get('num_yticks', None)
if num_xticks is not None:
# TODO check if xdata is integral
if xdata.dtype.kind == 'i':
xticks = np.linspace(np.ceil(xmin), np.floor(xmax),
num_xticks).astype(np.int32)
else:
xticks = np.linspace((xmin), (xmax), num_xticks)
ax.set_xticks(xticks)
if num_yticks is not None:
if ydata.dtype.kind == 'i':
yticks = np.linspace(np.ceil(ymin), np.floor(ymax),
num_yticks).astype(np.int32)
else:
yticks = np.linspace((ymin), (ymax), num_yticks)
ax.set_yticks(yticks)
force_xticks = kwargs.get('force_xticks', None)
if force_xticks is not None:
xticks = np.array(sorted(ax.get_xticks().tolist() + force_xticks))
ax.set_xticks(xticks)
yticklabels = kwargs.get('yticklabels', None)
if yticklabels is not None:
# Hack ONLY WORKS WHEN TRANSPOSE = True
# Overrides num_yticks
ax.set_yticks(ydata)
ax.set_yticklabels(yticklabels)
xticklabels = kwargs.get('xticklabels', None)
if xticklabels is not None:
# Overrides num_xticks
ax.set_xticks(xdata)
ax.set_xticklabels(xticklabels)
xtick_rotation = kwargs.get('xtick_rotation', None)
if xtick_rotation is not None:
[lbl.set_rotation(xtick_rotation)
for lbl in ax.get_xticklabels()]
ytick_rotation = kwargs.get('ytick_rotation', None)
if ytick_rotation is not None:
[lbl.set_rotation(ytick_rotation)
for lbl in ax.get_yticklabels()]
# Axis padding
xpad = kwargs.get('xpad', None)
ypad = kwargs.get('ypad', None)
xpad_factor = kwargs.get('xpad_factor', None)
ypad_factor = kwargs.get('ypad_factor', None)
if xpad is None and xpad_factor is not None:
xpad = (xmax - xmin) * xpad_factor
if ypad is None and ypad_factor is not None:
ypad = (ymax - ymin) * ypad_factor
xpad = 0 if xpad is None else xpad
ypad = 0 if ypad is None else ypad
ypad_high = kwargs.get('ypad_high', ypad)
ypad_low = kwargs.get('ypad_low', ypad)
xpad_high = kwargs.get('xpad_high', xpad)
xpad_low = kwargs.get('xpad_low', xpad)
xmin, xmax = (xmin - xpad_low), (xmax + xpad_high)
ymin, ymax = (ymin - ypad_low), (ymax + ypad_high)
ax.set_xlim(xmin, xmax)
ax.set_ylim(ymin, ymax)
xscale = kwargs.get('xscale', None)
yscale = kwargs.get('yscale', None)
if yscale is not None:
ax.set_yscale(yscale)
if xscale is not None:
ax.set_xscale(xscale)
gridlinestyle = kwargs.get('gridlinestyle', None)
gridlinewidth = kwargs.get('gridlinewidth', None)
gridlines = ax.get_xgridlines() + ax.get_ygridlines()
if gridlinestyle:
for line in gridlines:
line.set_linestyle(gridlinestyle)
if gridlinewidth:
for line in gridlines:
line.set_linewidth(gridlinewidth)
# Setup title
if title is not None:
titlekw = {
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=titlesize)
}
ax.set_title(title, **titlekw)
use_legend = kwargs.get('use_legend', 'label' in valid_keys)
legend_loc = kwargs.get('legend_loc', 'best')
legend_alpha = kwargs.get('legend_alpha', 1.0)
if use_legend:
legendkw = {
'alpha': legend_alpha,
'fontproperties': mpl.font_manager.FontProperties(
family=family,
weight=weight,
size=legendsize)
}
legend(loc=legend_loc, ax=ax, **legendkw)
figtitle = kwargs.get('figtitle', None)
if figtitle is not None:
set_figtitle(figtitle, fontfamily=family, fontweight=weight,
size=kwargs.get('figtitlesize'))
use_darkbackground = kwargs.get('use_darkbackground', None)
lightbg = kwargs.get('lightbg', None)
if lightbg is None:
lightbg = True
if use_darkbackground is None:
use_darkbackground = not lightbg
if use_darkbackground:
_dark_background(force=use_darkbackground is True)
# TODO: return better info
return fig
def figure(fnum=None, pnum=(1, 1, 1), title=None, figtitle=None, doclf=False,
docla=False, projection=None, **kwargs):
"""
http://matplotlib.org/users/gridspec.html
Args:
fnum (int): fignum = figure number
pnum (int, str, or tuple(int, int, int)): plotnum = plot tuple
title (str): (default = None)
figtitle (None): (default = None)
docla (bool): (default = False)
doclf (bool): (default = False)
Returns:
mpl.Figure: fig
CommandLine:
python -m netharn.util.mplutil figure:0 --show
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> show_if_requested()
Example:
>>> autompl()
>>> import matplotlib.pyplot as plt
>>> fnum = 1
>>> fig = figure(fnum, (2, 2, 1))
>>> plt.gca().text(0.5, 0.5, "ax1", va="center", ha="center")
>>> fig = figure(fnum, (2, 2, 2))
>>> plt.gca().text(0.5, 0.5, "ax2", va="center", ha="center")
>>> fig = figure(fnum, (2, 4, (1, slice(1, None))))
>>> plt.gca().text(0.5, 0.5, "ax3", va="center", ha="center")
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
def ensure_fig(fnum=None):
if fnum is None:
try:
fig = plt.gcf()
except Exception as ex:
fig = plt.figure()
else:
try:
fig = plt.figure(fnum)
except Exception as ex:
fig = plt.gcf()
return fig
def _convert_pnum_int_to_tup(int_pnum):
# Convert pnum to tuple format if in integer format
nr = int_pnum // 100
nc = int_pnum // 10 - (nr * 10)
px = int_pnum - (nr * 100) - (nc * 10)
pnum = (nr, nc, px)
return pnum
def _pnum_to_subspec(pnum):
if isinstance(pnum, six.string_types):
pnum = list(pnum)
nrow, ncols, plotnum = pnum
# if kwargs.get('use_gridspec', True):
# Convert old pnums to gridspec
gs = gridspec.GridSpec(nrow, ncols)
if isinstance(plotnum, (tuple, slice, list)):
subspec = gs[plotnum]
else:
subspec = gs[plotnum - 1]
return (subspec,)
def _setup_subfigure(pnum):
if isinstance(pnum, int):
pnum = _convert_pnum_int_to_tup(pnum)
axes_list = fig.get_axes()
if docla or len(axes_list) == 0:
if pnum is not None:
assert pnum[0] > 0, 'nRows must be > 0: pnum=%r' % (pnum,)
assert pnum[1] > 0, 'nCols must be > 0: pnum=%r' % (pnum,)
subspec = _pnum_to_subspec(pnum)
ax = fig.add_subplot(*subspec, projection=projection)
if len(axes_list) > 0:
ax.cla()
else:
ax = plt.gca()
else:
if pnum is not None:
subspec = _pnum_to_subspec(pnum)
ax = plt.subplot(*subspec)
else:
ax = plt.gca()
fig = ensure_fig(fnum)
if doclf:
fig.clf()
if pnum is not None:
_setup_subfigure(pnum)
# Set the title / figtitle
if title is not None:
ax = plt.gca()
ax.set_title(title)
if figtitle is not None:
fig.suptitle(figtitle)
return fig
def pandas_plot_matrix(df, rot=90, ax=None, grid=True, label=None,
zerodiag=False,
cmap='viridis', showvals=False, logscale=True):
import matplotlib as mpl
import copy
from matplotlib import pyplot as plt
if ax is None:
fig = figure(fnum=1, pnum=(1, 1, 1))
fig.clear()
ax = plt.gca()
ax = plt.gca()
values = df.values
if zerodiag:
values = values.copy()
values = values - np.diag(np.diag(values))
# aximg = ax.imshow(values, interpolation='none', cmap='viridis')
if logscale:
from matplotlib.colors import LogNorm
vmin = df[df > 0].min().min()
norm = LogNorm(vmin=vmin, vmax=values.max())
else:
norm = None
cmap = copy.copy(mpl.cm.get_cmap(cmap)) # copy the default cmap
cmap.set_bad((0, 0, 0))
aximg = ax.matshow(values, interpolation='none', cmap=cmap, norm=norm)
# aximg = ax.imshow(values, interpolation='none', cmap='viridis', norm=norm)
# ax.imshow(values, interpolation='none', cmap='viridis')
ax.grid(False)
cax = plt.colorbar(aximg, ax=ax)
if label is not None:
cax.set_label(label)
ax.set_xticks(list(range(len(df.index))))
ax.set_xticklabels([lbl[0:100] for lbl in df.index])
for lbl in ax.get_xticklabels():
lbl.set_rotation(rot)
for lbl in ax.get_xticklabels():
lbl.set_horizontalalignment('center')
ax.set_yticks(list(range(len(df.columns))))
ax.set_yticklabels([lbl[0:100] for lbl in df.columns])
for lbl in ax.get_yticklabels():
lbl.set_horizontalalignment('right')
for lbl in ax.get_yticklabels():
lbl.set_verticalalignment('center')
# Grid lines around the pixels
if grid:
offset = -.5
xlim = [-.5, len(df.columns)]
ylim = [-.5, len(df.index)]
segments = []
for x in range(ylim[1]):
xdata = [x + offset, x + offset]
ydata = ylim
segment = list(zip(xdata, ydata))
segments.append(segment)
for y in range(xlim[1]):
xdata = xlim
ydata = [y + offset, y + offset]
segment = list(zip(xdata, ydata))
segments.append(segment)
bingrid = mpl.collections.LineCollection(segments, color='w', linewidths=1)
ax.add_collection(bingrid)
if showvals:
x_basis = np.arange(len(df.columns))
y_basis = np.arange(len(df.index))
x, y = np.meshgrid(x_basis, y_basis)
for c, r in zip(x.flatten(), y.flatten()):
val = df.iloc[r, c]
ax.text(c, r, val, va='center', ha='center', color='white')
return ax
def axes_extent(axs, pad=0.0):
"""
Get the full extent of a group of axes, including axes labels, tick labels,
and titles.
"""
import itertools as it
import matplotlib as mpl
def axes_parts(ax):
yield ax
for label in ax.get_xticklabels():
if label.get_text():
yield label
for label in ax.get_yticklabels():
if label.get_text():
yield label
xlabel = ax.get_xaxis().get_label()
ylabel = ax.get_yaxis().get_label()
for label in (xlabel, ylabel, ax.title):
if label.get_text():
yield label
items = it.chain.from_iterable(axes_parts(ax) for ax in axs)
extents = [item.get_window_extent() for item in items]
#mpl.transforms.Affine2D().scale(1.1)
extent = mpl.transforms.Bbox.union(extents)
extent = extent.expanded(1.0 + pad, 1.0 + pad)
return extent
def extract_axes_extents(fig, combine=False, pad=0.0):
# Make sure we draw the axes first so we can
# extract positions from the text objects
import matplotlib as mpl
fig.canvas.draw()
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
dpi_scale_trans_inv = fig.dpi_scale_trans.inverted()
axes_bboxes_ = [axes_extent(axs, pad) for axs in atomic_axes]
axes_extents_ = [extent.transformed(dpi_scale_trans_inv) for extent in axes_bboxes_]
# axes_extents_ = axes_bboxes_
if combine:
# Grab include extents of figure text as well
# FIXME: This might break on OSX
# http://stackoverflow.com/questions/22667224/bbox-backend
renderer = fig.canvas.get_renderer()
for mpl_text in fig.texts:
bbox = mpl_text.get_window_extent(renderer=renderer)
extent_ = bbox.expanded(1.0 + pad, 1.0 + pad)
extent = extent_.transformed(dpi_scale_trans_inv)
# extent = extent_
axes_extents_.append(extent)
axes_extents = mpl.transforms.Bbox.union(axes_extents_)
else:
axes_extents = axes_extents_
# if True:
# axes_extents.x0 = 0
# # axes_extents.y1 = 0
return axes_extents
def adjust_subplots(left=None, right=None, bottom=None, top=None, wspace=None,
hspace=None, fig=None):
"""
Kwargs:
left (float): left side of the subplots of the figure
right (float): right side of the subplots of the figure
bottom (float): bottom of the subplots of the figure
top (float): top of the subplots of the figure
wspace (float): width reserved for blank space between subplots
hspace (float): height reserved for blank space between subplots
"""
from matplotlib import pyplot as plt
kwargs = dict(left=left, right=right, bottom=bottom, top=top,
wspace=wspace, hspace=hspace)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
if fig is None:
fig = plt.gcf()
subplotpars = fig.subplotpars
adjust_dict = subplotpars.__dict__.copy()
del adjust_dict['validate']
adjust_dict.update(kwargs)
fig.subplots_adjust(**adjust_dict)
def render_figure_to_image(fig, **savekw):
import io
import cv2
import matplotlib as mpl
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
with io.BytesIO() as stream:
# This call takes 23% - 15% of the time depending on settings
fig.savefig(stream, bbox_inches=extent, **savekw)
# fig.savefig(stream, **savekw)
stream.seek(0)
data = np.fromstring(stream.getvalue(), dtype=np.uint8)
im_bgra = cv2.imdecode(data, cv2.IMREAD_UNCHANGED)
return im_bgra
def savefig2(fig, fpath, **kwargs):
"""
Does a tight layout and saves the figure with transparency
"""
import matplotlib as mpl
if 'transparent' not in kwargs:
kwargs['transparent'] = True
if 'extent' not in kwargs:
axes_extents = extract_axes_extents(fig)
extent = mpl.transforms.Bbox.union(axes_extents)
kwargs['extent'] = extent
fig.savefig(fpath, **kwargs)
def copy_figure_to_clipboard(fig):
"""
References:
https://stackoverflow.com/questions/17676373/python-matplotlib-pyqt-copy-image-to-clipboard
"""
print('Copying figure %d to the clipboard' % fig.number)
import matplotlib as mpl
app = mpl.backends.backend_qt5.qApp
QtGui = mpl.backends.backend_qt5.QtGui
im_bgra = render_figure_to_image(fig, transparent=True)
im_rgba = cv2.cvtColor(im_bgra, cv2.COLOR_BGRA2RGBA)
im = im_rgba
QImage = QtGui.QImage
qim = QImage(im.data, im.shape[1], im.shape[0], im.strides[0], QImage.Format_RGBA8888)
clipboard = app.clipboard()
clipboard.setImage(qim)
# size = fig.canvas.size()
# width, height = size.width(), size.height()
# qim = QtGui.QImage(fig.canvas.buffer_rgba(), width, height, QtGui.QImage.Format_ARGB32)
# QtWidgets = mpl.backends.backend_qt5.QtWidgets
# pixmap = QtWidgets.QWidget.grab(fig.canvas)
# clipboard.setPixmap(pixmap)
def dict_intersection(dict1, dict2):
r"""
Args:
dict1 (dict):
dict2 (dict):
Returns:
dict: mergedict_
CommandLine:
python -m utool.util_dict --exec-dict_intersection
Example:
>>> # ENABLE_DOCTEST
>>> dict1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
>>> dict2 = {'b': 2, 'c': 3, 'd': 5, 'e': 21, 'f': 42}
>>> mergedict_ = dict_intersection(dict1, dict2)
>>> print(ub.repr2(mergedict_, nl=0))
{'b': 2, 'c': 3}
"""
isect_keys = set(dict1.keys()).intersection(set(dict2.keys()))
# maintain order if possible
if isinstance(dict1, ub.odict):
isect_keys_ = [k for k in dict1.keys() if k in isect_keys]
_dict_cls = ub.odict
else:
isect_keys_ = isect_keys
_dict_cls = dict
dict_isect = _dict_cls(
(k, dict1[k]) for k in isect_keys_ if dict1[k] == dict2[k]
)
return dict_isect
def _dark_background(ax=None, doubleit=False, force=False):
r"""
Args:
ax (None): (default = None)
doubleit (bool): (default = False)
CommandLine:
python -m .draw_func2 --exec-_dark_background --show
Example:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> fig = figure()
>>> _dark_background()
>>> show_if_requested()
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
def is_using_style(style):
style_dict = mpl.style.library[style]
return len(dict_intersection(style_dict, mpl.rcParams)) == len(style_dict)
if force:
from mpl_toolkits.mplot3d import Axes3D
BLACK = np.array(( 0, 0, 0, 255)) / 255.0
# Should use mpl style dark background instead
bgcolor = BLACK * .9
if ax is None:
ax = plt.gca()
if isinstance(ax, Axes3D):
ax.set_axis_bgcolor(bgcolor)
ax.tick_params(colors='white')
return
xy, width, height = _get_axis_xy_width_height(ax)
if doubleit:
halfw = (doubleit) * (width / 2)
halfh = (doubleit) * (height / 2)
xy = (xy[0] - halfw, xy[1] - halfh)
width *= (doubleit + 1)
height *= (doubleit + 1)
rect = mpl.patches.Rectangle(xy, width, height, lw=0, zorder=0)
rect.set_clip_on(True)
rect.set_fill(True)
rect.set_color(bgcolor)
rect.set_zorder(-99999999999)
rect = ax.add_patch(rect)
def _get_axis_xy_width_height(ax=None, xaug=0, yaug=0, waug=0, haug=0):
""" gets geometry of a subplot """
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
autoAxis = ax.axis()
xy = (autoAxis[0] + xaug, autoAxis[2] + yaug)
width = (autoAxis[1] - autoAxis[0]) + waug
height = (autoAxis[3] - autoAxis[2]) + haug
return xy, width, height
_LEGEND_LOCATION = {
'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def set_figtitle(figtitle, subtitle='', forcefignum=True, incanvas=True,
size=None, fontfamily=None, fontweight=None,
fig=None):
r"""
Args:
figtitle (?):
subtitle (str): (default = '')
forcefignum (bool): (default = True)
incanvas (bool): (default = True)
fontfamily (None): (default = None)
fontweight (None): (default = None)
size (None): (default = None)
fig (None): (default = None)
CommandLine:
python -m .custom_figure set_figtitle --show
Example:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> fig = figure(fnum=1, doclf=True)
>>> result = set_figtitle(figtitle='figtitle', fig=fig)
>>> # xdoc: +REQUIRES(--show)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
if figtitle is None:
figtitle = ''
if fig is None:
fig = plt.gcf()
figtitle = ub.ensure_unicode(figtitle)
subtitle = ub.ensure_unicode(subtitle)
if incanvas:
if subtitle != '':
subtitle = '\n' + subtitle
prop = {
'family': fontfamily,
'weight': fontweight,
'size': size,
}
prop = {k: v for k, v in prop.items() if v is not None}
sup = fig.suptitle(figtitle + subtitle)
if prop:
fontproperties = sup.get_fontproperties().copy()
for key, val in prop.items():
getattr(fontproperties, 'set_' + key)(val)
sup.set_fontproperties(fontproperties)
# fontproperties = mpl.font_manager.FontProperties(**prop)
else:
fig.suptitle('')
# Set title in the window
window_figtitle = ('fig(%d) ' % fig.number) + figtitle
window_figtitle = window_figtitle.replace('\n', ' ')
fig.canvas.set_window_title(window_figtitle)
def legend(loc='best', fontproperties=None, size=None, fc='w', alpha=1,
ax=None, handles=None):
r"""
Args:
loc (str): (default = 'best')
fontproperties (None): (default = None)
size (None): (default = None)
Ignore:
>>> # ENABLE_DOCTEST
>>> autompl()
>>> loc = 'best'
>>> xdata = np.linspace(-6, 6)
>>> ydata = np.sin(xdata)
>>> plt.plot(xdata, ydata, label='sin')
>>> fontproperties = None
>>> size = None
>>> result = legend(loc, fontproperties, size)
>>> print(result)
>>> show_if_requested()
"""
from matplotlib import pyplot as plt
assert loc in _LEGEND_LOCATION or loc == 'best', (
'invalid loc. try one of %r' % (_LEGEND_LOCATION,))
if ax is None:
ax = plt.gca()
if fontproperties is None:
prop = {}
if size is not None:
prop['size'] = size
# prop['weight'] = 'normal'
# prop['family'] = 'sans-serif'
else:
prop = fontproperties
legendkw = dict(loc=loc)
if prop:
legendkw['prop'] = prop
if handles is not None:
legendkw['handles'] = handles
legend = ax.legend(**legendkw)
if legend:
legend.get_frame().set_fc(fc)
legend.get_frame().set_alpha(alpha)
def distinct_colors(N, brightness=.878, randomize=True, hue_range=(0.0, 1.0), cmap_seed=None):
r"""
Args:
N (int):
brightness (float):
Returns:
list: RGB_tuples
CommandLine:
python -m color_funcs --test-distinct_colors --N 2 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 3 --show --hue-range=0.05,.95
python -m color_funcs --test-distinct_colors --N 4 --show --hue-range=0.05,.95
python -m .color_funcs --test-distinct_colors --N 3 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 4 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 6 --show --no-randomize
python -m .color_funcs --test-distinct_colors --N 20 --show
References:
http://blog.jianhuashao.com/2011/09/generate-n-distinct-colors.html
CommandLine:
python -m .color_funcs --exec-distinct_colors --show
python -m .color_funcs --exec-distinct_colors --show --no-randomize --N 50
python -m .color_funcs --exec-distinct_colors --show --cmap_seed=foobar
Ignore:
>>> # build test data
>>> autompl()
>>> N = ub.smartcast(ub.get_argval('--N', default=2), int) # FIXME
>>> randomize = not ub.argflag('--no-randomize')
>>> brightness = 0.878
>>> # execute function
>>> cmap_seed = ub.get_argval('--cmap_seed', default=None)
>>> hue_range = ub.smartcast(ub.get_argval('--hue-range', default=(0.00, 1.0)), list) #FIXME
>>> RGB_tuples = distinct_colors(N, brightness, randomize, hue_range, cmap_seed=cmap_seed)
>>> # verify results
>>> assert len(RGB_tuples) == N
>>> result = str(RGB_tuples)
>>> print(result)
>>> # xdoctest: +REQUIRES(--show)
>>> color_list = RGB_tuples
>>> testshow_colors(color_list)
>>> show_if_requested()
"""
# TODO: Add sin wave modulation to the sat and value
# HACK for white figures
from matplotlib import pyplot as plt
import colorsys
remove_yellow = True
use_jet = False
if use_jet:
cmap = plt.cm.jet
RGB_tuples = list(map(tuple, cmap(np.linspace(0, 1, N))))
elif cmap_seed is not None:
# Randomized map based on a seed
#cmap_ = 'Set1'
#cmap_ = 'Dark2'
choices = [
#'Set1', 'Dark2',
'jet',
#'gist_rainbow',
#'rainbow',
#'gnuplot',
#'Accent'
]
cmap_hack = ub.argval('--cmap-hack', default=None)
ncolor_hack = ub.argval('--ncolor-hack', default=None)
if cmap_hack is not None:
choices = [cmap_hack]
if ncolor_hack is not None:
N = int(ncolor_hack)
N_ = N
seed = sum(list(map(ord, ub.hash_data(cmap_seed))))
rng = np.random.RandomState(seed + 48930)
cmap_str = rng.choice(choices, 1)[0]
#print('cmap_str = %r' % (cmap_str,))
cmap = plt.cm.get_cmap(cmap_str)
#.hashstr27(cmap_seed)
#cmap_seed = 0
#pass
jitter = (rng.randn(N) / (rng.randn(100).max() / 2)).clip(-1, 1) * ((1 / (N ** 2)))
range_ = np.linspace(0, 1, N, endpoint=False)
#print('range_ = %r' % (range_,))
range_ = range_ + jitter
#print('range_ = %r' % (range_,))
while not (np.all(range_ >= 0) and np.all(range_ <= 1)):
range_[range_ < 0] = np.abs(range_[range_ < 0] )
range_[range_ > 1] = 2 - range_[range_ > 1]
#print('range_ = %r' % (range_,))
shift = rng.rand()
range_ = (range_ + shift) % 1
#print('jitter = %r' % (jitter,))
#print('shift = %r' % (shift,))
#print('range_ = %r' % (range_,))
if ncolor_hack is not None:
range_ = range_[0:N_]
RGB_tuples = list(map(tuple, cmap(range_)))
else:
sat = brightness
val = brightness
hmin, hmax = hue_range
if remove_yellow:
hue_skips = [(.13, .24)]
else:
hue_skips = []
hue_skip_ranges = [_[1] - _[0] for _ in hue_skips]
total_skip = sum(hue_skip_ranges)
hmax_ = hmax - total_skip
hue_list = np.linspace(hmin, hmax_, N, endpoint=False, dtype=np.float)
# Remove colors (like hard to see yellows) in specified ranges
for skip, range_ in zip(hue_skips, hue_skip_ranges):
hue_list = [hue if hue <= skip[0] else hue + range_ for hue in hue_list]
HSV_tuples = [(hue, sat, val) for hue in hue_list]
RGB_tuples = [colorsys.hsv_to_rgb(*x) for x in HSV_tuples]
if randomize:
deterministic_shuffle(RGB_tuples)
return RGB_tuples
def distinct_markers(num, style='astrisk', total=None, offset=0):
r"""
Args:
num (?):
CommandLine:
python -m .draw_func2 --exec-distinct_markers --show
python -m .draw_func2 --exec-distinct_markers --style=star --show
python -m .draw_func2 --exec-distinct_markers --style=polygon --show
Ignore:
>>> autompl()
>>> style = ub.get_argval('--style', type_=str, default='astrisk')
>>> marker_list = distinct_markers(10, style)
>>> x_data = np.arange(0, 3)
>>> for count, (marker) in enumerate(marker_list):
>>> plt.plot(x_data, [count] * len(x_data), marker=marker, markersize=10, linestyle='', label=str(marker))
>>> legend()
>>> show_if_requested()
"""
num_sides = 3
style_num = {
'astrisk': 2,
'star': 1,
'polygon': 0,
'circle': 3
}[style]
if total is None:
total = num
total_degrees = 360 / num_sides
marker_list = [
(num_sides, style_num, total_degrees * (count + offset) / total)
for count in range(num)
]
return marker_list
def deterministic_shuffle(list_, rng=0):
r"""
Args:
list_ (list):
seed (int):
Returns:
list: list_
Example:
>>> list_ = [1, 2, 3, 4, 5, 6]
>>> seed = 1
>>> list_ = deterministic_shuffle(list_, seed)
>>> result = str(list_)
>>> print(result)
[3, 2, 5, 1, 4, 6]
"""
from netharn import util
rng = util.ensure_rng(rng)
rng.shuffle(list_)
return list_
_BASE_FNUM = 9001
def next_fnum(new_base=None):
global _BASE_FNUM
if new_base is not None:
_BASE_FNUM = new_base
_BASE_FNUM += 1
return _BASE_FNUM
def ensure_fnum(fnum):
if fnum is None:
return next_fnum()
return fnum
def _save_requested(fpath_, save_parts):
raise NotImplementedError('havent done this yet')
# dpi = ub.argval('--dpi', type_=int, default=200)
from os.path import expanduser
from matplotlib import pyplot as plt
dpi = 200
fpath_ = expanduser(fpath_)
print('Figure save was requested')
# arg_dict = ut.get_arg_dict(prefix_list=['--', '-'],
# type_hints={'t': list, 'a': list})
arg_dict = {}
# HACK
arg_dict = {
key: (val[0] if len(val) == 1 else '[' + ']['.join(val) + ']')
if isinstance(val, list) else val
for key, val in arg_dict.items()
}
fpath_ = fpath_.format(**arg_dict)
fpath_ = fpath_.replace(' ', '').replace('\'', '').replace('"', '')
dpath = ub.argval('--dpath', type_=str, default=None)
if dpath is None:
gotdpath = False
dpath = '.'
else:
gotdpath = True
fpath = join(dpath, fpath_)
if not gotdpath:
dpath = dirname(fpath_)
print('dpath = %r' % (dpath,))
fig = plt.gcf()
fig.dpi = dpi
fpath_strict = ub.truepath(fpath)
CLIP_WHITE = ub.argflag('--clipwhite')
from netharn import util
if save_parts:
# TODO: call save_parts instead, but we still need to do the
# special grouping.
# Group axes that belong together
atomic_axes = []
seen_ = set([])
for ax in fig.axes:
div = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if div is not None:
df2_div_axes = _get_plotdat_dict(ax).get('df2_div_axes', [])
seen_.add(ax)
seen_.update(set(df2_div_axes))
atomic_axes.append([ax] + df2_div_axes)
# TODO: pad these a bit
else:
if ax not in seen_:
atomic_axes.append([ax])
seen_.add(ax)
hack_axes_group_row = ub.argflag('--grouprows')
if hack_axes_group_row:
groupid_list = []
for axs in atomic_axes:
for ax in axs:
groupid = ax.colNum
groupid_list.append(groupid)
groups = ub.group_items(atomic_axes, groupid_list)
new_groups = list(map(ub.flatten, groups.values()))
atomic_axes = new_groups
#[[(ax.rowNum, ax.colNum) for ax in axs] for axs in atomic_axes]
# save all rows of each column
subpath_list = save_parts(fig=fig, fpath=fpath_strict,
grouped_axes=atomic_axes, dpi=dpi)
absfpath_ = subpath_list[-1]
if CLIP_WHITE:
for subpath in subpath_list:
# remove white borders
util.clipwhite_ondisk(subpath, subpath)
else:
savekw = {}
# savekw['transparent'] = fpath.endswith('.png') and not noalpha
savekw['transparent'] = ub.argflag('--alpha')
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
savekw['bbox_inches'] = extract_axes_extents(fig, combine=True) # replaces need for clipwhite
absfpath_ = ub.truepath(fpath)
fig.savefig(absfpath_, **savekw)
if CLIP_WHITE:
# remove white borders
fpath_in = fpath_out = absfpath_
util.clipwhite_ondisk(fpath_in, fpath_out)
if ub.argflag(('--diskshow', '--ds')):
# show what we wrote
ub.startfile(absfpath_)
def show_if_requested(N=1):
"""
Used at the end of tests. Handles command line arguments for saving figures
Referencse:
http://stackoverflow.com/questions/4325733/save-a-subplot-in-matplotlib
"""
import matplotlib.pyplot as plt
# Process figures adjustments from command line before a show or a save
# udpate_adjust_subplots()
# if use_argv:
# # hack to take args from commandline
# adjust_dict = ut.parse_dict_from_argv(adjust_dict)
# adjust_subplots(use_argv=True)
# def update_figsize():
# """ updates figsize based on command line """
# figsize = ub.argval('--figsize', type_=list, default=None)
# if figsize is not None:
# # Enforce inches and DPI
# fig = plt.gcf()
# figsize = [eval(term) if isinstance(term, str) else term
# for term in figsize]
# figw, figh = figsize[0], figsize[1]
# print('get_size_inches = %r' % (fig.get_size_inches(),))
# print('fig w,h (inches) = %r, %r' % (figw, figh))
# fig.set_size_inches(figw, figh)
# #print('get_size_inches = %r' % (fig.get_size_inches(),))
# update_figsize()
save_parts = ub.argflag('--saveparts')
fpath_ = ub.argval('--save', default=None)
if fpath_ is None:
fpath_ = ub.argval('--saveparts', default=None)
save_parts = True
if fpath_ is not None:
_save_requested(fpath_, save_parts)
# elif ub.argflag('--cmd'):
# pass
if ub.argflag('--show'):
# if ub.argflag('--tile'):
# if ut.get_computer_name().lower() in ['hyrule']:
# fig_presenter.all_figures_tile(percent_w=.5, monitor_num=0)
# else:
# fig_presenter.all_figures_tile()
# if ub.argflag('--present'):
# fig_presenter.present()
# for fig in fig_presenter.get_all_figures():
# fig.set_dpi(80)
plt.show()
def save_parts(fig, fpath, grouped_axes=None, dpi=None):
"""
FIXME: this works in mpl 2.0.0, but not 2.0.2
Args:
fig (?):
fpath (str): file path string
dpi (None): (default = None)
Returns:
list: subpaths
CommandLine:
python -m draw_func2 save_parts
Ignore:
>>> # DISABLE_DOCTEST
>>> autompl()
>>> import matplotlib as mpl
>>> import matplotlib.pyplot as plt
>>> def testimg(fname):
>>> return plt.imread(mpl.cbook.get_sample_data(fname))
>>> fnames = ['grace_hopper.png', 'ada.png'] * 4
>>> fig = plt.figure(1)
>>> for c, fname in enumerate(fnames, start=1):
>>> ax = fig.add_subplot(3, 4, c)
>>> ax.imshow(testimg(fname))
>>> ax.set_title(fname[0:3] + str(c))
>>> ax.set_xticks([])
>>> ax.set_yticks([])
>>> ax = fig.add_subplot(3, 1, 3)
>>> ax.plot(np.sin(np.linspace(0, np.pi * 2)))
>>> ax.set_xlabel('xlabel')
>>> ax.set_ylabel('ylabel')
>>> ax.set_title('title')
>>> fpath = 'test_save_parts.png'
>>> adjust_subplots(fig=fig, wspace=.3, hspace=.3, top=.9)
>>> subpaths = save_parts(fig, fpath, dpi=300)
>>> fig.savefig(fpath)
>>> ub.startfile(subpaths[0])
>>> ub.startfile(fpath)
"""
if dpi:
# Need to set figure dpi before we draw
fig.dpi = dpi
# We need to draw the figure before calling get_window_extent
# (or we can figure out how to set the renderer object)
# if getattr(fig.canvas, 'renderer', None) is None:
fig.canvas.draw()
# Group axes that belong together
if grouped_axes is None:
grouped_axes = []
for ax in fig.axes:
grouped_axes.append([ax])
subpaths = []
_iter = enumerate(grouped_axes, start=0)
_iter = ub.ProgIter(list(_iter), label='save subfig')
for count, axs in _iter:
subpath = ub.augpath(fpath, suffix=chr(count + 65))
extent = axes_extent(axs).transformed(fig.dpi_scale_trans.inverted())
savekw = {}
savekw['transparent'] = ub.argflag('--alpha')
if dpi is not None:
savekw['dpi'] = dpi
savekw['edgecolor'] = 'none'
fig.savefig(subpath, bbox_inches=extent, **savekw)
subpaths.append(subpath)
return subpaths
_qtensured = False
def _current_ipython_session():
"""
Returns a reference to the current IPython session, if one is running
"""
try:
__IPYTHON__
except NameError:
return None
else:
import IPython
ipython = IPython.get_ipython()
# if ipython is None we must have exited ipython at some point
return ipython
def qtensure():
"""
If you are in an IPython session, ensures that your backend is Qt.
"""
global _qtensured
if not _qtensured:
ipython = _current_ipython_session()
if ipython:
import sys
if 'PyQt4' in sys.modules:
ipython.magic('pylab qt4 --no-import-all')
_qtensured = True
else:
ipython.magic('pylab qt5 --no-import-all')
_qtensured = True
def aggensure():
"""
Ensures that you are in agg mode as long as IPython is not running
This might help prevent errors in tmux like:
qt.qpa.screen: QXcbConnection: Could not connect to display localhost:10.0
Could not connect to any X display.
"""
import matplotlib as mpl
current_backend = mpl.get_backend()
if current_backend != 'agg':
ipython = _current_ipython_session()
if not ipython:
set_mpl_backend('agg')
def set_mpl_backend(backend):
"""
Args:
backend (str): name of backend to use (e.g. Agg, PyQt)
"""
import sys
import matplotlib as mpl
if backend.lower().startswith('qt'):
# handle interactive qt case
qtensure()
if backend != mpl.get_backend():
# If we have already imported pyplot, then we need to use experimental
# behavior. Otherwise, we can just set the backend.
if 'matplotlib.pyplot' in sys.modules:
from matplotlib import pyplot as plt
plt.switch_backend(backend)
else:
mpl.use(backend)
def autompl():
"""
Uses platform heuristics to automatically set the mpl backend.
If no display is available it will be set to agg, otherwise we will try to
use the cross-platform Qt5Agg backend.
"""
import os
import sys
if sys.platform.startswith('win32'):
# TODO: something reasonable
pass
else:
DISPLAY = os.environ.get('DISPLAY', '')
if not DISPLAY:
set_mpl_backend('agg')
else:
set_mpl_backend('Qt5Agg')
def imshow(img, fnum=None, title=None, figtitle=None, pnum=None,
interpolation='nearest', cmap=None, heatmap=False,
data_colorbar=False, xlabel=None, redraw_image=True,
colorspace='bgr', ax=None, alpha=None, norm=None, **kwargs):
r"""
Args:
img (ndarray): image data
fnum (int): figure number
colorspace (str): if the data is 3-4 channels, this indicates the colorspace
1 channel data is assumed grayscale. 4 channels assumes alpha.
title (str):
figtitle (None):
pnum (tuple): plot number
interpolation (str): other interpolations = nearest, bicubic, bilinear
cmap (None):
heatmap (bool):
data_colorbar (bool):
darken (None):
redraw_image (bool): used when calling imshow over and over. if false
doesnt do the image part.
Returns:
tuple: (fig, ax)
Kwargs:
docla, doclf, projection
Returns:
tuple: (fig, ax)
Ignore:
>>> autompl()
>>> img_fpath = ut.grab_test_imgpath('carl.jpg')
>>> img = util.imread(img_fpath)
>>> (fig, ax) = imshow(img)
>>> result = ('(fig, ax) = %s' % (str((fig, ax)),))
>>> print(result)
>>> ut.show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
if ax is not None:
fig = ax.figure
nospecial = True
else:
fig = figure(fnum=fnum, pnum=pnum, title=title, figtitle=figtitle, **kwargs)
ax = plt.gca()
nospecial = False
#ax.set_xticks([])
#ax.set_yticks([])
#return fig, ax
if not redraw_image:
return fig, ax
if isinstance(img, six.string_types):
# Allow for path to image to be specified
from netharn import util
img_fpath = img
img = util.imread(img_fpath)
plt_imshow_kwargs = {
'interpolation': interpolation,
#'cmap': plt.get_cmap('gray'),
}
if alpha is not None:
plt_imshow_kwargs['alpha'] = alpha
if norm is not None:
if norm is True:
norm = mpl.colors.Normalize()
plt_imshow_kwargs['norm'] = norm
else:
if cmap is None and not heatmap and not nospecial:
plt_imshow_kwargs['vmin'] = 0
plt_imshow_kwargs['vmax'] = 255
if heatmap:
cmap = 'hot'
# Handle tensor chw format in most cases
if img.ndim == 3:
if img.shape[0] == 3 or img.shape[0] == 1:
if img.shape[2] > 4:
# probably in chw format
img = img.transpose(1, 2, 0)
try:
if len(img.shape) == 3 and (img.shape[2] == 3 or img.shape[2] == 4):
# img is in a color format
from netharn import util
dst_space = 'rgb'
if img.shape[2] == 4:
colorspace += 'a'
dst_space += 'a'
imgRGB = util.convert_colorspace(img, dst_space=dst_space,
src_space=colorspace)
if imgRGB.dtype.kind == 'f':
maxval = imgRGB.max()
if maxval > 1.01 and maxval < 256:
imgRGB = np.array(imgRGB, dtype=np.uint8)
ax.imshow(imgRGB, **plt_imshow_kwargs)
elif len(img.shape) == 2 or (len(img.shape) == 3 and img.shape[2] == 1):
# img is in grayscale
if len(img.shape) == 3:
imgGRAY = img.reshape(img.shape[0:2])
else:
imgGRAY = img
if cmap is None:
cmap = plt.get_cmap('gray')
if isinstance(cmap, six.string_types):
cmap = plt.get_cmap(cmap)
# for some reason gray floats aren't working right
if imgGRAY.max() <= 1.01 and imgGRAY.min() >= -1E-9:
imgGRAY = (imgGRAY * 255).astype(np.uint8)
ax.imshow(imgGRAY, cmap=cmap, **plt_imshow_kwargs)
else:
raise AssertionError(
'unknown image format. img.dtype=%r, img.shape=%r' %
(img.dtype, img.shape))
except TypeError as te:
print('[df2] imshow ERROR %r' % (te,))
raise
except Exception as ex:
print('!!!!!!!!!!!!!!WARNING!!!!!!!!!!!')
print('[df2] type(img) = %r' % type(img))
if not isinstance(img, np.ndarray):
print('!!!!!!!!!!!!!!ERRROR!!!!!!!!!!!')
pass
#print('img = %r' % (img,))
print('[df2] img.dtype = %r' % (img.dtype,))
print('[df2] type(img) = %r' % (type(img),))
print('[df2] img.shape = %r' % (img.shape,))
print('[df2] imshow ERROR %r' % ex)
raise
#plt.set_cmap('gray')
ax.set_xticks([])
ax.set_yticks([])
if data_colorbar is True:
scores = np.unique(img.flatten())
if cmap is None:
cmap = 'hot'
colors = scores_to_color(scores, cmap)
colorbar(scores, colors)
if xlabel is not None:
ax.set_xlabel(xlabel)
if figtitle is not None:
set_figtitle(figtitle)
return fig, ax
def colorbar(scalars, colors, custom=False, lbl=None, ticklabels=None,
float_format='%.2f', **kwargs):
"""
adds a color bar next to the axes based on specific scalars
Args:
scalars (ndarray):
colors (ndarray):
custom (bool): use custom ticks
Kwargs:
See plt.colorbar
Returns:
cb : matplotlib colorbar object
Ignore:
>>> autompl()
>>> scalars = np.array([-1, -2, 1, 1, 2, 7, 10])
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = True
>>> reverse_cmap = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale, reverse_cmap=reverse_cmap, val2_customcolor=val2_customcolor)
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
Ignore:
>>> # ENABLE_DOCTEST
>>> scalars = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> logscale = False
>>> custom = False
>>> reverse_cmap = False
>>> colors = scores_to_color(scalars, cmap_=cmap_, logscale=logscale,
>>> reverse_cmap=reverse_cmap)
>>> colors = [lighten_rgb(c, .3) for c in colors]
>>> colorbar(scalars, colors, custom=custom)
>>> df2.present()
>>> show_if_requested()
"""
import matplotlib as mpl
import matplotlib.pyplot as plt
assert len(scalars) == len(colors), 'scalars and colors must be corresponding'
if len(scalars) == 0:
return None
# Parameters
ax = plt.gca()
divider = _ensure_divider(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
xy, width, height = _get_axis_xy_width_height(ax)
#orientation = ['vertical', 'horizontal'][0]
TICK_FONTSIZE = 8
#
# Create scalar mappable with cmap
if custom:
# FIXME: clean this code up and change the name custom
# to be meaningful. It is more like: display unique colors
unique_scalars, unique_idx = np.unique(scalars, return_index=True)
unique_colors = np.array(colors)[unique_idx]
#max_, min_ = unique_scalars.max(), unique_scalars.min()
#extent_ = max_ - min_
#bounds = np.linspace(min_, max_ + 1, extent_ + 2)
listed_cmap = mpl.colors.ListedColormap(unique_colors)
#norm = mpl.colors.BoundaryNorm(bounds, listed_cmap.N)
#sm = mpl.cm.ScalarMappable(cmap=listed_cmap, norm=norm)
sm = mpl.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(np.linspace(0, 1, len(unique_scalars) + 1))
else:
sorted_scalars = sorted(scalars)
listed_cmap = scores_to_cmap(scalars, colors)
sm = plt.cm.ScalarMappable(cmap=listed_cmap)
sm.set_array(sorted_scalars)
# Use mapable object to create the colorbar
#COLORBAR_SHRINK = .42 # 1
#COLORBAR_PAD = .01 # 1
#COLORBAR_ASPECT = np.abs(20 * height / (width)) # 1
cb = plt.colorbar(sm, cax=cax, **kwargs)
## Add the colorbar to the correct label
#axis = cb.ax.yaxis # if orientation == 'horizontal' else cb.ax.yaxis
#position = 'bottom' if orientation == 'horizontal' else 'right'
#axis.set_ticks_position(position)
# This line alone removes data
# axis.set_ticks([0, .5, 1])
if custom:
ticks = np.linspace(0, 1, len(unique_scalars) + 1)
if len(ticks) < 2:
ticks += .5
else:
# SO HACKY
ticks += (ticks[1] - ticks[0]) / 2
if isinstance(unique_scalars, np.ndarray) and unique_scalars.dtype.kind == 'f':
ticklabels = [float_format % scalar for scalar in unique_scalars]
else:
ticklabels = unique_scalars
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels) # tick labels
elif ticklabels is not None:
ticks_ = cb.ax.get_yticks()
mx = ticks_.max()
mn = ticks_.min()
ticks = np.linspace(mn, mx, len(ticklabels))
cb.set_ticks(ticks) # tick locations
cb.set_ticklabels(ticklabels)
#cb.ax.get_yticks()
#cb.set_ticks(ticks) # tick locations
#cb.set_ticklabels(ticklabels) # tick labels
# _set_plotdat(cb.ax, 'viztype', 'colorbar-%s' % (lbl,))
# _set_plotdat(cb.ax, 'sm', sm)
# FIXME: Figure out how to make a maximum number of ticks
# and to enforce them to be inside the data bounds
cb.ax.tick_params(labelsize=TICK_FONTSIZE)
# Sets current axis
plt.sca(ax)
if lbl is not None:
cb.set_label(lbl)
return cb
_DF2_DIVIDER_KEY = '_df2_divider'
def _get_plotdat(ax, key, default=None):
""" returns internal property from a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
val = _plotdat.get(key, default)
return val
def _set_plotdat(ax, key, val):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
_plotdat[key] = val
def _del_plotdat(ax, key):
""" sets internal property to a matplotlib axis """
_plotdat = _get_plotdat_dict(ax)
if key in _plotdat:
del _plotdat[key]
def _get_plotdat_dict(ax):
""" sets internal property to a matplotlib axis """
if '_plotdat' not in ax.__dict__:
ax.__dict__['_plotdat'] = {}
plotdat_dict = ax.__dict__['_plotdat']
return plotdat_dict
def _ensure_divider(ax):
""" Returns previously constructed divider or creates one """
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = _get_plotdat(ax, _DF2_DIVIDER_KEY, None)
if divider is None:
divider = make_axes_locatable(ax)
_set_plotdat(ax, _DF2_DIVIDER_KEY, divider)
orig_append_axes = divider.append_axes
def df2_append_axes(divider, position, size, pad=None, add_to_figure=True, **kwargs):
""" override divider add axes to register the divided axes """
div_axes = _get_plotdat(ax, 'df2_div_axes', [])
new_ax = orig_append_axes(position, size, pad=pad, add_to_figure=add_to_figure, **kwargs)
div_axes.append(new_ax)
_set_plotdat(ax, 'df2_div_axes', div_axes)
return new_ax
new_method = df2_append_axes.__get__(divider, divider.__class__)
setattr(divider, 'append_axes', new_method)
# ut.inject_func_as_method(divider, df2_append_axes, 'append_axes', allow_override=True)
return divider
def scores_to_cmap(scores, colors=None, cmap_='hot'):
import matplotlib as mpl
if colors is None:
colors = scores_to_color(scores, cmap_=cmap_)
scores = np.array(scores)
colors = np.array(colors)
sortx = scores.argsort()
sorted_colors = colors[sortx]
# Make a listed colormap and mappable object
listed_cmap = mpl.colors.ListedColormap(sorted_colors)
return listed_cmap
def scores_to_color(score_list, cmap_='hot', logscale=False, reverse_cmap=False,
custom=False, val2_customcolor=None, score_range=None,
cmap_range=(.1, .9)):
"""
Other good colormaps are 'spectral', 'gist_rainbow', 'gist_ncar', 'Set1',
'Set2', 'Accent'
# TODO: plasma
Args:
score_list (list):
cmap_ (str): defaults to hot
logscale (bool):
cmap_range (tuple): restricts to only a portion of the cmap to avoid extremes
Returns:
<class '_ast.ListComp'>
Ignore:
>>> ut.exec_funckw(scores_to_color, globals())
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> # score_list = np.array([0, .1, .11, .12, .13, .8])
>>> # score_list = np.linspace(0, 1, 100)
>>> cmap_ = 'plasma'
>>> colors = scores_to_color(score_list, cmap_)
>>> imgRGB = util.atleast_nd(np.array(colors)[:, 0:3], 3, tofront=True)
>>> imgRGB = imgRGB.astype(np.float32)
>>> imgBGR = util.convert_colorspace(imgRGB, 'BGR', 'RGB')
>>> imshow(imgBGR)
>>> show_if_requested()
Ignore:
>>> score_list = np.array([-1, -2, 1, 1, 2, 10])
>>> cmap_ = 'hot'
>>> logscale = False
>>> reverse_cmap = True
>>> custom = True
>>> val2_customcolor = {
... -1: UNKNOWN_PURP,
... -2: LIGHT_BLUE,
... }
"""
import matplotlib.pyplot as plt
assert len(score_list.shape) == 1, 'score must be 1d'
if len(score_list) == 0:
return []
def apply_logscale(scores):
scores = np.array(scores)
above_zero = scores >= 0
scores_ = scores.copy()
scores_[above_zero] = scores_[above_zero] + 1
scores_[~above_zero] = scores_[~above_zero] - 1
scores_ = np.log2(scores_)
return scores_
if logscale:
# Hack
score_list = apply_logscale(score_list)
#if loglogscale
#score_list = np.log2(np.log2(score_list + 2) + 1)
#if isinstance(cmap_, six.string_types):
cmap = plt.get_cmap(cmap_)
#else:
# cmap = cmap_
if reverse_cmap:
cmap = reverse_colormap(cmap)
#if custom:
# base_colormap = cmap
# data = score_list
# cmap = customize_colormap(score_list, base_colormap)
if score_range is None:
min_ = score_list.min()
max_ = score_list.max()
else:
min_ = score_range[0]
max_ = score_range[1]
if logscale:
min_, max_ = apply_logscale([min_, max_])
if cmap_range is None:
cmap_scale_min, cmap_scale_max = 0., 1.
else:
cmap_scale_min, cmap_scale_max = cmap_range
extent_ = max_ - min_
if extent_ == 0:
colors = [cmap(.5) for fx in range(len(score_list))]
else:
if False and logscale:
# hack
def score2_01(score):
return np.log2(
1 + cmap_scale_min + cmap_scale_max *
(float(score) - min_) / (extent_))
score_list = np.array(score_list)
#rank_multiplier = score_list.argsort() / len(score_list)
#normscore = np.array(list(map(score2_01, score_list))) * rank_multiplier
normscore = np.array(list(map(score2_01, score_list)))
colors = list(map(cmap, normscore))
else:
def score2_01(score):
return cmap_scale_min + cmap_scale_max * (float(score) - min_) / (extent_)
colors = [cmap(score2_01(score)) for score in score_list]
if val2_customcolor is not None:
colors = [
np.array(val2_customcolor.get(score, color))
for color, score in zip(colors, score_list)]
return colors
def reverse_colormap(cmap):
"""
References:
http://nbviewer.ipython.org/github/kwinkunks/notebooks/blob/master/Matteo_colourmaps.ipynb
"""
import matplotlib as mpl
if isinstance(cmap, mpl.colors.ListedColormap):
return mpl.colors.ListedColormap(cmap.colors[::-1])
else:
reverse = []
k = []
for key, channel in six.iteritems(cmap._segmentdata):
data = []
for t in channel:
data.append((1 - t[0], t[1], t[2]))
k.append(key)
reverse.append(sorted(data))
cmap_reversed = mpl.colors.LinearSegmentedColormap(
cmap.name + '_reversed', dict(zip(k, reverse)))
return cmap_reversed
class PlotNums(object):
"""
Convinience class for dealing with plot numberings (pnums)
Example:
>>> pnum_ = PlotNums(nRows=2, nCols=2)
>>> # Indexable
>>> print(pnum_[0])
(2, 2, 1)
>>> # Iterable
>>> print(ub.repr2(list(pnum_), nl=0, nobr=True))
(2, 2, 1), (2, 2, 2), (2, 2, 3), (2, 2, 4)
>>> # Callable (iterates through a default iterator)
>>> print(pnum_())
(2, 2, 1)
>>> print(pnum_())
(2, 2, 2)
"""
def __init__(self, nRows=None, nCols=None, nSubplots=None, start=0):
nRows, nCols = self._get_num_rc(nSubplots, nRows, nCols)
self.nRows = nRows
self.nCols = nCols
base = 0
self.offset = 0 if base == 1 else 1
self.start = start
self._iter = None
def __getitem__(self, px):
return (self.nRows, self.nCols, px + self.offset)
def __call__(self):
"""
replacement for make_pnum_nextgen
Example:
>>> import itertools as it
>>> pnum_ = PlotNums(nSubplots=9)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
Example:
>>> import itertools as it
>>> for nRows, nCols, nSubplots in it.product([None, 3], [None, 3], [None, 9]):
>>> start = 0
>>> pnum_ = PlotNums(nRows, nCols, nSubplots, start)
>>> pnum_list = list( (pnum_() for _ in it.count()) )
>>> print((nRows, nCols, nSubplots))
>>> result = ('pnum_list = %s' % (ub.repr2(pnum_list),))
>>> print(result)
"""
if self._iter is None:
self._iter = iter(self)
return six.next(self._iter)
def __iter__(self):
r"""
Yields:
tuple : pnum
Example:
>>> pnum_ = iter(PlotNums(nRows=3, nCols=2))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 1),
(3, 2, 2),
(3, 2, 3),
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
Example:
>>> nRows = 3
>>> nCols = 2
>>> pnum_ = iter(PlotNums(nRows, nCols, start=3))
>>> result = ub.repr2(list(pnum_), nl=1, nobr=True)
>>> print(result)
(3, 2, 4),
(3, 2, 5),
(3, 2, 6),
"""
for px in range(self.start, len(self)):
yield self[px]
def __len__(self):
total_plots = self.nRows * self.nCols
return total_plots
@classmethod
def _get_num_rc(PlotNums, nSubplots=None, nRows=None, nCols=None):
r"""
Gets a constrained row column plot grid
Args:
nSubplots (None): (default = None)
nRows (None): (default = None)
nCols (None): (default = None)
Returns:
tuple: (nRows, nCols)
Example:
>>> cases = [
>>> dict(nRows=None, nCols=None, nSubplots=None),
>>> dict(nRows=2, nCols=None, nSubplots=5),
>>> dict(nRows=None, nCols=2, nSubplots=5),
>>> dict(nRows=None, nCols=None, nSubplots=5),
>>> ]
>>> for kw in cases:
>>> print('----')
>>> size = PlotNums._get_num_rc(**kw)
>>> if kw['nSubplots'] is not None:
>>> assert size[0] * size[1] >= kw['nSubplots']
>>> print('**kw = %s' % (ub.repr2(kw),))
>>> print('size = %r' % (size,))
"""
if nSubplots is None:
if nRows is None:
nRows = 1
if nCols is None:
nCols = 1
else:
if nRows is None and nCols is None:
nRows, nCols = PlotNums._get_square_row_cols(nSubplots)
elif nRows is not None:
nCols = int(np.ceil(nSubplots / nRows))
elif nCols is not None:
nRows = int(np.ceil(nSubplots / nCols))
return nRows, nCols
def _get_square_row_cols(nSubplots, max_cols=None, fix=False, inclusive=True):
r"""
Args:
nSubplots (int):
max_cols (int):
Returns:
tuple: (int, int)
Example:
>>> nSubplots = 9
>>> nSubplots_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
>>> max_cols = None
>>> rc_list = [PlotNums._get_square_row_cols(nSubplots, fix=True) for nSubplots in nSubplots_list]
>>> print(repr(np.array(rc_list).T))
array([[1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3],
[1, 2, 2, 2, 3, 3, 3, 3, 3, 4, 4]])
"""
if nSubplots == 0:
return 0, 0
if inclusive:
rounder = np.ceil
else:
rounder = np.floor
if fix:
# This function is very broken, but it might have dependencies
# this is the correct version
nCols = int(rounder(np.sqrt(nSubplots)))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
else:
# This is the clamped num cols version
# probably used in ibeis.viz
if max_cols is None:
max_cols = 5
if nSubplots in [4]:
max_cols = 2
if nSubplots in [5, 6, 7]:
max_cols = 3
if nSubplots in [8]:
max_cols = 4
nCols = int(min(nSubplots, max_cols))
#nCols = int(min(rounder(np.sqrt(nrids)), 5))
nRows = int(rounder(nSubplots / nCols))
return nRows, nCols
def draw_border(ax, color, lw=2, offset=None, adjust=True):
'draws rectangle border around a subplot'
if adjust:
xy, width, height = _get_axis_xy_width_height(ax, -.7, -.2, 1, .4)
else:
xy, width, height = _get_axis_xy_width_height(ax)
if offset is not None:
xoff, yoff = offset
xy = [xoff, yoff]
height = - height - yoff
width = width - xoff
import matplotlib as mpl
rect = mpl.patches.Rectangle(xy, width, height, lw=lw)
rect = ax.add_patch(rect)
rect.set_clip_on(False)
rect.set_fill(False)
rect.set_edgecolor(color)
return rect
def draw_boxes(boxes, box_format='xywh', color='blue', labels=None,
textkw=None, ax=None):
"""
Args:
boxes (list): list of coordindates in xywh, tlbr, or cxywh format
box_format (str): specify how boxes are formated
xywh is the top left x and y pixel width and height
cxywh is the center xy pixel width and height
tlbr is the top left xy and the bottom right xy
color (str): edge color of the boxes
labels (list): if specified, plots a text annotation on each box
Example:
>>> from netharn.util.mplutil import *
>>> autompl()
>>> bboxes = [[.1, .1, .6, .3], [.3, .5, .5, .6]]
>>> col = draw_boxes(bboxes)
"""
import matplotlib as mpl
from matplotlib import pyplot as plt
if ax is None:
ax = plt.gca()
from netharn import util
if isinstance(boxes, util.Boxes):
box_format = boxes.format
boxes = boxes.data
if not len(boxes):
return
boxes = np.asarray(boxes)
if box_format == 'xywh':
xywh = boxes
elif box_format == 'cxywh':
cx, cy, w, h = boxes.T[0:4]
x1 = cx - (w / 2)
y1 = cy - (h / 2)
xywh = np.vstack([x1, y1, w, h]).T
elif box_format == 'tlbr':
x1, y1 = boxes.T[0:2]
w, h = boxes.T[2:4] - boxes.T[0:2]
xywh = np.vstack([x1, y1, w, h]).T
else:
raise KeyError(box_format)
edgecolor = Color(color).as01('rgba')
facecolor = Color((0, 0, 0, 0)).as01('rgba')
rectkw = dict(ec=edgecolor, fc=facecolor, lw=2, linestyle='solid')
patches = [mpl.patches.Rectangle((x, y), w, h, **rectkw)
for x, y, w, h in xywh]
col = mpl.collections.PatchCollection(patches, match_original=True)
ax.add_collection(col)
if labels:
texts = []
default_textkw = {
'horizontalalignment': 'left',
'verticalalignment': 'top',
'backgroundcolor': (0, 0, 0, .3),
'color': 'white',
'fontproperties': mpl.font_manager.FontProperties(
size=6, family='monospace'),
}
tkw = default_textkw.copy()
if textkw is not None:
tkw.update(textkw)
for (x1, y1, w, h), label in zip(xywh, labels):
texts.append((x1, y1, label, tkw))
for (x1, y1, catname, tkw) in texts:
ax.text(x1, y1, catname, **tkw)
return col
def draw_line_segments(pts1, pts2, ax=None, **kwargs):
"""
draws `N` line segments between `N` pairs of points
Args:
pts1 (ndarray): Nx2
pts2 (ndarray): Nx2
ax (None): (default = None)
**kwargs: lw, alpha, colors
CommandLine:
python -m netharn.util.mplutil draw_line_segments --show
Example:
>>> pts1 = np.array([(.1, .8), (.6, .8)])
>>> pts2 = np.array([(.6, .7), (.4, .1)])
>>> figure(fnum=None)
>>> draw_line_segments(pts1, pts2)
>>> # xdoc: +REQUIRES(--show)
>>> import matplotlib.pyplot as plt
>>> ax = plt.gca()
>>> ax.set_xlim(0, 1)
>>> ax.set_ylim(0, 1)
>>> show_if_requested()
"""
import matplotlib.pyplot as plt
import matplotlib as mpl
if ax is None:
ax = plt.gca()
assert len(pts1) == len(pts2), 'unaligned'
segments = [(xy1, xy2) for xy1, xy2 in zip(pts1, pts2)]
linewidth = kwargs.pop('lw', kwargs.pop('linewidth', 1.0))
alpha = kwargs.pop('alpha', 1.0)
if 'color' in kwargs:
kwargs['colors'] = kwargs['color']
# mpl.colors.ColorConverter().to_rgb(kwargs['color'])
line_group = mpl.collections.LineCollection(segments, linewidths=linewidth,
alpha=alpha, **kwargs)
ax.add_collection(line_group)
def make_heatmask(probs, cmap='plasma', with_alpha=True):
"""
Colorizes a single-channel intensity mask (with an alpha channel)
"""
import matplotlib as mpl
from netharn.util import imutil
assert len(probs.shape) == 2
cmap_ = mpl.cm.get_cmap(cmap)
probs = imutil.ensure_float01(probs)
heatmask = cmap_(probs)
if with_alpha:
heatmask[:, :, 0:3] = heatmask[:, :, 0:3][:, :, ::-1]
heatmask[:, :, 3] = probs
return heatmask
def colorbar_image(domain, cmap='plasma', dpi=96, shape=(200, 20), transparent=False):
"""
Notes:
shape is approximate
Ignore:
domain = np.linspace(-30, 200)
cmap='plasma'
dpi = 80
dsize = (20, 200)
util.imwrite('foo.png', util.colorbar_image(np.arange(0, 1)), shape=(400, 80))
import plottool as pt
pt.qtensure()
import matplotlib as mpl
mpl.style.use('ggplot')
util.imwrite('foo.png', util.colorbar_image(np.linspace(0, 1, 100), dpi=200, shape=(1000, 40), transparent=1))
ub.startfile('foo.png')
"""
import matplotlib as mpl
mpl.use('agg', force=False, warn=False)
from matplotlib import pyplot as plt
fig = plt.figure(dpi=dpi)
w, h = shape[1] / dpi, shape[0] / dpi
# w, h = 1, 10
fig.set_size_inches(w, h)
ax = fig.add_subplot('111')
sm = plt.cm.ScalarMappable(cmap=plt.get_cmap(cmap))
sm.set_array(domain)
plt.colorbar(sm, cax=ax)
cb_img = render_figure_to_image(fig, dpi=dpi, transparent=transparent)
plt.close(fig)
return cb_img
class Color(ub.NiceRepr):
"""
move to colorutil?
Example:
>>> from netharn.util.mplutil import *
>>> print(Color('g'))
>>> print(Color('orangered'))
>>> print(Color('#AAAAAA').as255())
>>> print(Color([0, 255, 0]))
>>> print(Color([1, 1, 1.]))
>>> print(Color([1, 1, 1]))
>>> print(Color(Color([1, 1, 1])).as255())
>>> print(Color(Color([1., 0, 1, 0])).ashex())
>>> print(Color([1, 1, 1], alpha=255))
>>> print(Color([1, 1, 1], alpha=255, space='lab'))
"""
def __init__(self, color, alpha=None, space=None):
if isinstance(color, Color):
assert alpha is None
assert space is None
space = color.space
color = color.color01
else:
color = self._ensure_color01(color)
if alpha is not None:
alpha = self._ensure_color01([alpha])[0]
if space is None:
space = 'rgb'
# always normalize the color down to 01
color01 = list(color)
if alpha is not None:
if len(color01) not in [1, 3]:
raise ValueError('alpha already in color')
color01 = color01 + [alpha]
# correct space if alpha is given
if len(color01) in [2, 4]:
if not space.endswith('a'):
space += 'a'
self.color01 = color01
self.space = space
def __nice__(self):
colorpart = ', '.join(['{:.2f}'.format(c) for c in self.color01])
return self.space + ': ' + colorpart
def ashex(self, space=None):
c255 = self.as255(space)
return '#' + ''.join(['{:02x}'.format(c) for c in c255])
def as255(self, space=None):
color = (np.array(self.as01(space)) * 255).astype(np.uint8)
return tuple(map(int, color))
def as01(self, space=None):
"""
self = mplutil.Color('red')
mplutil.Color('green').as01('rgba')
"""
color = tuple(self.color01)
if space is not None:
if space == self.space:
pass
elif space == 'rgba' and self.space == 'rgb':
color = color + (1,)
elif space == 'bgr' and self.space == 'rgb':
color = color[::-1]
elif space == 'rgb' and self.space == 'bgr':
color = color[::-1]
else:
assert False
return tuple(map(float, color))
@classmethod
def _is_base01(channels):
""" check if a color is in base 01 """
def _test_base01(channels):
tests01 = {
'is_float': all([isinstance(c, (float, np.float64)) for c in channels]),
'is_01': all([c >= 0.0 and c <= 1.0 for c in channels]),
}
return tests01
if isinstance(channels, six.string_types):
return False
return all(_test_base01(channels).values())
@classmethod
def _is_base255(Color, channels):
""" there is a one corner case where all pixels are 1 or less """
if (all(c > 0.0 and c <= 255.0 for c in channels) and any(c > 1.0 for c in channels)):
# Definately in 255 space
return True
else:
# might be in 01 or 255
return all(isinstance(c, int) for c in channels)
@classmethod
def _hex_to_01(Color, hex_color):
"""
hex_color = '#6A5AFFAF'
"""
assert hex_color.startswith('#'), 'not a hex string %r' % (hex_color,)
parts = hex_color[1:].strip()
color255 = tuple(int(parts[i: i + 2], 16) for i in range(0, len(parts), 2))
assert len(color255) in [3, 4], 'must be length 3 or 4'
return Color._255_to_01(color255)
def _ensure_color01(Color, color):
""" Infer what type color is and normalize to 01 """
if isinstance(color, six.string_types):
color = Color._string_to_01(color)
elif Color._is_base255(color):
color = Color._255_to_01(color)
return color
@classmethod
def _255_to_01(Color, color255):
""" converts base 255 color to base 01 color """
return [channel / 255.0 for channel in color255]
@classmethod
def _string_to_01(Color, color):
"""
mplutil.Color._string_to_01('green')
mplutil.Color._string_to_01('red')
"""
from matplotlib import colors as mcolors
if color in mcolors.BASE_COLORS:
color01 = mcolors.BASE_COLORS[color]
elif color in mcolors.CSS4_COLORS:
color_hex = mcolors.CSS4_COLORS[color]
color01 = Color._hex_to_01(color_hex)
elif color.startswith('#'):
color01 = Color._hex_to_01(color)
else:
raise ValueError('unknown color=%r' % (color,))
return color01
@classmethod
def named_colors():
from matplotlib import colors as mcolors
names = sorted(list(mcolors.BASE_COLORS.keys()) + list(mcolors.CSS4_COLORS.keys()))
return names
@classmethod
def distinct(Color, num, space='rgb'):
"""
Make multiple distinct colors
"""
import matplotlib as mpl
import matplotlib._cm as _cm
cm = mpl.colors.LinearSegmentedColormap.from_list(
'gist_rainbow', _cm.datad['gist_rainbow'],
mpl.rcParams['image.lut'])
distinct_colors = [
np.array(cm(i / num)).tolist()[0:3]
for i in range(num)
]
if space == 'rgb':
return distinct_colors
else:
return [Color(c, space='rgb').as01(space=space) for c in distinct_colors]
if __name__ == '__main__':
r"""
CommandLine:
python -m netharn.util.mplutil
"""
import xdoctest
xdoctest.doctest_module(__file__)
| [
"numpy.sqrt",
"sys.platform.startswith",
"io.BytesIO",
"colorsys.hsv_to_rgb",
"matplotlib.collections.LineCollection",
"numpy.array",
"matplotlib.colors.CSS4_COLORS.keys",
"numpy.isfinite",
"cv2.imdecode",
"matplotlib.pyplot.switch_backend",
"netharn.util.imutil.ensure_float01",
"netharn.util.... | [((8034, 8054), 'numpy.array', 'np.array', (['ydata_list'], {}), '(ydata_list)\n', (8042, 8054), True, 'import numpy as np\n'), ((12667, 12686), 'matplotlib.rcParams.copy', 'mpl.rcParams.copy', ([], {}), '()\n', (12684, 12686), True, 'import matplotlib as mpl\n'), ((14038, 14099), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'family': 'family', 'weight': 'weight'}), '(family=family, weight=weight)\n', (14069, 14099), True, 'import matplotlib as mpl\n'), ((24865, 24874), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24872, 24874), True, 'from matplotlib import pyplot as plt\n'), ((25599, 25625), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['aximg'], {'ax': 'ax'}), '(aximg, ax=ax)\n', (25611, 25625), True, 'from matplotlib import pyplot as plt\n'), ((28012, 28046), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['extents'], {}), '(extents)\n', (28037, 28046), True, 'import matplotlib as mpl\n'), ((30635, 30674), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents'], {}), '(axes_extents)\n', (30660, 30674), True, 'import matplotlib as mpl\n'), ((30977, 31017), 'cv2.imdecode', 'cv2.imdecode', (['data', 'cv2.IMREAD_UNCHANGED'], {}), '(data, cv2.IMREAD_UNCHANGED)\n', (30989, 31017), False, 'import cv2\n'), ((31876, 31918), 'cv2.cvtColor', 'cv2.cvtColor', (['im_bgra', 'cv2.COLOR_BGRA2RGBA'], {}), '(im_bgra, cv2.COLOR_BGRA2RGBA)\n', (31888, 31918), False, 'import cv2\n'), ((36533, 36560), 'ubelt.ensure_unicode', 'ub.ensure_unicode', (['figtitle'], {}), '(figtitle)\n', (36550, 36560), True, 'import ubelt as ub\n'), ((36576, 36603), 'ubelt.ensure_unicode', 'ub.ensure_unicode', (['subtitle'], {}), '(subtitle)\n', (36593, 36603), True, 'import ubelt as ub\n'), ((45094, 45114), 'netharn.util.ensure_rng', 'util.ensure_rng', (['rng'], {}), '(rng)\n', (45109, 45114), False, 'from netharn import util\n'), ((45674, 45692), 'os.path.expanduser', 'expanduser', (['fpath_'], {}), '(fpath_)\n', (45684, 45692), False, 'from os.path import expanduser\n'), ((46187, 46232), 'ubelt.argval', 'ub.argval', (['"""--dpath"""'], {'type_': 'str', 'default': 'None'}), "('--dpath', type_=str, default=None)\n", (46196, 46232), True, 'import ubelt as ub\n'), ((46347, 46366), 'os.path.join', 'join', (['dpath', 'fpath_'], {}), '(dpath, fpath_)\n', (46351, 46366), False, 'from os.path import join, dirname\n'), ((46466, 46475), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (46473, 46475), True, 'from matplotlib import pyplot as plt\n'), ((46514, 46532), 'ubelt.truepath', 'ub.truepath', (['fpath'], {}), '(fpath)\n', (46525, 46532), True, 'import ubelt as ub\n'), ((46550, 46575), 'ubelt.argflag', 'ub.argflag', (['"""--clipwhite"""'], {}), "('--clipwhite')\n", (46560, 46575), True, 'import ubelt as ub\n'), ((48775, 48809), 'ubelt.argflag', 'ub.argflag', (["('--diskshow', '--ds')"], {}), "(('--diskshow', '--ds'))\n", (48785, 48809), True, 'import ubelt as ub\n'), ((50122, 50147), 'ubelt.argflag', 'ub.argflag', (['"""--saveparts"""'], {}), "('--saveparts')\n", (50132, 50147), True, 'import ubelt as ub\n'), ((50162, 50195), 'ubelt.argval', 'ub.argval', (['"""--save"""'], {'default': 'None'}), "('--save', default=None)\n", (50171, 50195), True, 'import ubelt as ub\n'), ((50427, 50447), 'ubelt.argflag', 'ub.argflag', (['"""--show"""'], {}), "('--show')\n", (50437, 50447), True, 'import ubelt as ub\n'), ((54480, 54497), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (54495, 54497), True, 'import matplotlib as mpl\n'), ((55510, 55542), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win32"""'], {}), "('win32')\n", (55533, 55542), False, 'import sys\n'), ((62645, 62654), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (62652, 62654), True, 'from matplotlib import pyplot as plt\n'), ((64029, 64064), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'cax'}), '(sm, cax=cax, **kwargs)\n', (64041, 64064), True, 'from matplotlib import pyplot as plt\n'), ((65568, 65579), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (65575, 65579), True, 'from matplotlib import pyplot as plt\n'), ((67660, 67676), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (67668, 67676), True, 'import numpy as np\n'), ((67690, 67706), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (67698, 67706), True, 'import numpy as np\n'), ((67837, 67877), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['sorted_colors'], {}), '(sorted_colors)\n', (67862, 67877), True, 'import matplotlib as mpl\n'), ((70011, 70030), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap_'], {}), '(cmap_)\n', (70023, 70030), True, 'from matplotlib import pyplot as plt\n'), ((78751, 78798), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['xy', 'width', 'height'], {'lw': 'lw'}), '(xy, width, height, lw=lw)\n', (78772, 78798), True, 'import matplotlib as mpl\n'), ((79964, 79981), 'numpy.asarray', 'np.asarray', (['boxes'], {}), '(boxes)\n', (79974, 79981), True, 'import numpy as np\n'), ((80663, 80724), 'matplotlib.collections.PatchCollection', 'mpl.collections.PatchCollection', (['patches'], {'match_original': '(True)'}), '(patches, match_original=True)\n', (80694, 80724), True, 'import matplotlib as mpl\n'), ((82598, 82687), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['segments'], {'linewidths': 'linewidth', 'alpha': 'alpha'}), '(segments, linewidths=linewidth, alpha=alpha,\n **kwargs)\n', (82628, 82687), True, 'import matplotlib as mpl\n'), ((83022, 83043), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (83037, 83043), True, 'import matplotlib as mpl\n'), ((83056, 83084), 'netharn.util.imutil.ensure_float01', 'imutil.ensure_float01', (['probs'], {}), '(probs)\n', (83077, 83084), False, 'from netharn.util import imutil\n'), ((83903, 83942), 'matplotlib.use', 'mpl.use', (['"""agg"""'], {'force': '(False)', 'warn': '(False)'}), "('agg', force=False, warn=False)\n", (83910, 83942), True, 'import matplotlib as mpl\n'), ((83995, 84014), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'dpi': 'dpi'}), '(dpi=dpi)\n', (84005, 84014), True, 'from matplotlib import pyplot as plt\n'), ((84227, 84251), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['sm'], {'cax': 'ax'}), '(sm, cax=ax)\n', (84239, 84251), True, 'from matplotlib import pyplot as plt\n'), ((84333, 84347), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (84342, 84347), True, 'from matplotlib import pyplot as plt\n'), ((90223, 90256), 'xdoctest.doctest_module', 'xdoctest.doctest_module', (['__file__'], {}), '(__file__)\n', (90246, 90256), False, 'import xdoctest\n'), ((7332, 7370), 'ubelt.dict_take', 'ub.dict_take', (['plot_list_kw', 'valid_keys'], {}), '(plot_list_kw, valid_keys)\n', (7344, 7370), True, 'import ubelt as ub\n'), ((7555, 7596), 'ubelt.dict_take', 'ub.dict_take', (['plot_list_kw', 'extra_kw_keys'], {}), '(plot_list_kw, extra_kw_keys)\n', (7567, 7596), True, 'import ubelt as ub\n'), ((7907, 7916), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7914, 7916), True, 'from matplotlib import pyplot as plt\n'), ((7935, 7946), 'matplotlib.pyplot.sca', 'plt.sca', (['ax'], {}), '(ax)\n', (7942, 7946), True, 'from matplotlib import pyplot as plt\n'), ((8510, 8574), 'six.moves.zip_longest', 'zip_longest', (['xdata_list', 'ydata_list', 'plot_kw_list', 'extra_kw_list'], {}), '(xdata_list, ydata_list, plot_kw_list, extra_kw_list)\n', (8521, 8574), False, 'from six.moves import zip_longest\n'), ((8653, 8672), 'numpy.isfinite', 'np.isfinite', (['_ydata'], {}), '(_ydata)\n', (8664, 8672), True, 'import numpy as np\n'), ((13834, 13911), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'weight': 'weight', 'family': 'family', 'size': 'labelsize'}), '(weight=weight, family=family, size=labelsize)\n', (13865, 13911), True, 'import matplotlib as mpl\n'), ((23188, 23218), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['nrow', 'ncols'], {}), '(nrow, ncols)\n', (23205, 23218), True, 'import matplotlib.gridspec as gridspec\n'), ((24371, 24380), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24378, 24380), True, 'from matplotlib import pyplot as plt\n'), ((24846, 24855), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24853, 24855), True, 'from matplotlib import pyplot as plt\n'), ((25274, 25295), 'matplotlib.cm.get_cmap', 'mpl.cm.get_cmap', (['cmap'], {}), '(cmap)\n', (25289, 25295), True, 'import matplotlib as mpl\n'), ((26762, 26827), 'matplotlib.collections.LineCollection', 'mpl.collections.LineCollection', (['segments'], {'color': '"""w"""', 'linewidths': '(1)'}), "(segments, color='w', linewidths=1)\n", (26792, 26827), True, 'import matplotlib as mpl\n'), ((26984, 27013), 'numpy.meshgrid', 'np.meshgrid', (['x_basis', 'y_basis'], {}), '(x_basis, y_basis)\n', (26995, 27013), True, 'import numpy as np\n'), ((29300, 29340), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents_'], {}), '(axes_extents_)\n', (29325, 29340), True, 'import matplotlib as mpl\n'), ((30282, 30291), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (30289, 30291), True, 'from matplotlib import pyplot as plt\n'), ((30684, 30696), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (30694, 30696), False, 'import io\n'), ((31353, 31392), 'matplotlib.transforms.Bbox.union', 'mpl.transforms.Bbox.union', (['axes_extents'], {}), '(axes_extents)\n', (31378, 31392), True, 'import matplotlib as mpl\n'), ((34692, 34748), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['xy', 'width', 'height'], {'lw': '(0)', 'zorder': '(0)'}), '(xy, width, height, lw=0, zorder=0)\n', (34713, 34748), True, 'import matplotlib as mpl\n'), ((35098, 35107), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (35105, 35107), True, 'from matplotlib import pyplot as plt\n'), ((36508, 36517), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (36515, 36517), True, 'from matplotlib import pyplot as plt\n'), ((38281, 38290), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (38288, 38290), True, 'from matplotlib import pyplot as plt\n'), ((46404, 46419), 'os.path.dirname', 'dirname', (['fpath_'], {}), '(fpath_)\n', (46411, 46419), False, 'from os.path import join, dirname\n'), ((47348, 47373), 'ubelt.argflag', 'ub.argflag', (['"""--grouprows"""'], {}), "('--grouprows')\n", (47358, 47373), True, 'import ubelt as ub\n'), ((48338, 48359), 'ubelt.argflag', 'ub.argflag', (['"""--alpha"""'], {}), "('--alpha')\n", (48348, 48359), True, 'import ubelt as ub\n'), ((48548, 48566), 'ubelt.truepath', 'ub.truepath', (['fpath'], {}), '(fpath)\n', (48559, 48566), True, 'import ubelt as ub\n'), ((48848, 48871), 'ubelt.startfile', 'ub.startfile', (['absfpath_'], {}), '(absfpath_)\n', (48860, 48871), True, 'import ubelt as ub\n'), ((50236, 50274), 'ubelt.argval', 'ub.argval', (['"""--saveparts"""'], {'default': 'None'}), "('--saveparts', default=None)\n", (50245, 50274), True, 'import ubelt as ub\n'), ((50864, 50874), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (50872, 50874), True, 'from matplotlib import pyplot as plt\n'), ((53051, 53072), 'ubelt.argflag', 'ub.argflag', (['"""--alpha"""'], {}), "('--alpha')\n", (53061, 53072), True, 'import ubelt as ub\n'), ((53549, 53570), 'IPython.get_ipython', 'IPython.get_ipython', ([], {}), '()\n', (53568, 53570), False, 'import IPython\n'), ((54915, 54932), 'matplotlib.get_backend', 'mpl.get_backend', ([], {}), '()\n', (54930, 54932), True, 'import matplotlib as mpl\n'), ((55622, 55651), 'os.environ.get', 'os.environ.get', (['"""DISPLAY"""', '""""""'], {}), "('DISPLAY', '')\n", (55636, 55651), False, 'import os\n'), ((57335, 57344), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (57342, 57344), True, 'from matplotlib import pyplot as plt\n'), ((57662, 57684), 'netharn.util.imread', 'util.imread', (['img_fpath'], {}), '(img_fpath)\n', (57673, 57684), False, 'from netharn import util\n'), ((63102, 63139), 'numpy.unique', 'np.unique', (['scalars'], {'return_index': '(True)'}), '(scalars, return_index=True)\n', (63111, 63139), True, 'import numpy as np\n'), ((63370, 63410), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['unique_colors'], {}), '(unique_colors)\n', (63395, 63410), True, 'import matplotlib as mpl\n'), ((63552, 63591), 'matplotlib.cm.ScalarMappable', 'mpl.cm.ScalarMappable', ([], {'cmap': 'listed_cmap'}), '(cmap=listed_cmap)\n', (63573, 63591), True, 'import matplotlib as mpl\n'), ((63775, 63814), 'matplotlib.pyplot.cm.ScalarMappable', 'plt.cm.ScalarMappable', ([], {'cmap': 'listed_cmap'}), '(cmap=listed_cmap)\n', (63796, 63814), True, 'from matplotlib import pyplot as plt\n'), ((66673, 66696), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['ax'], {}), '(ax)\n', (66692, 66696), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((69541, 69557), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (69549, 69557), True, 'import numpy as np\n'), ((69751, 69767), 'numpy.log2', 'np.log2', (['scores_'], {}), '(scores_)\n', (69758, 69767), True, 'import numpy as np\n'), ((71964, 72008), 'matplotlib.colors.ListedColormap', 'mpl.colors.ListedColormap', (['cmap.colors[::-1]'], {}), '(cmap.colors[::-1])\n', (71989, 72008), True, 'import matplotlib as mpl\n'), ((72083, 72115), 'six.iteritems', 'six.iteritems', (['cmap._segmentdata'], {}), '(cmap._segmentdata)\n', (72096, 72115), False, 'import six\n'), ((74272, 74292), 'six.next', 'six.next', (['self._iter'], {}), '(self._iter)\n', (74280, 74292), False, 'import six\n'), ((79773, 79782), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (79780, 79782), True, 'from matplotlib import pyplot as plt\n'), ((80568, 80613), 'matplotlib.patches.Rectangle', 'mpl.patches.Rectangle', (['(x, y)', 'w', 'h'], {}), '((x, y), w, h, **rectkw)\n', (80589, 80613), True, 'import matplotlib as mpl\n'), ((82233, 82242), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (82240, 82242), True, 'from matplotlib import pyplot as plt\n'), ((89678, 89797), 'matplotlib.colors.LinearSegmentedColormap.from_list', 'mpl.colors.LinearSegmentedColormap.from_list', (['"""gist_rainbow"""', "_cm.datad['gist_rainbow']", "mpl.rcParams['image.lut']"], {}), "('gist_rainbow', _cm.datad[\n 'gist_rainbow'], mpl.rcParams['image.lut'])\n", (89722, 89797), True, 'import matplotlib as mpl\n'), ((3125, 3151), 'ubelt.take', 'ub.take', (['ydata_list', 'ykeys'], {}), '(ydata_list, ykeys)\n', (3132, 3151), True, 'import ubelt as ub\n'), ((3897, 3917), 'numpy.array', 'np.array', (['ydata_list'], {}), '(ydata_list)\n', (3905, 3917), True, 'import numpy as np\n'), ((4116, 4139), 'numpy.array', 'np.array', (['xd'], {'copy': '(True)'}), '(xd, copy=True)\n', (4124, 4139), True, 'import numpy as np\n'), ((10873, 10889), 'numpy.array', 'np.array', (['_xdata'], {}), '(_xdata)\n', (10881, 10889), True, 'import numpy as np\n'), ((10954, 10970), 'numpy.array', 'np.array', (['ydata_'], {}), '(ydata_)\n', (10962, 10970), True, 'import numpy as np\n'), ((10996, 11012), 'numpy.array', 'np.array', (['spread'], {}), '(spread)\n', (11004, 11012), True, 'import numpy as np\n'), ((11126, 11135), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (11133, 11135), True, 'from matplotlib import pyplot as plt\n'), ((12466, 12489), 'ubelt.ensure_unicode', 'ub.ensure_unicode', (['text'], {}), '(text)\n', (12483, 12489), True, 'import ubelt as ub\n'), ((14813, 14855), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['xtickformat'], {}), '(xtickformat)\n', (14842, 14855), True, 'import matplotlib as mpl\n'), ((14926, 14968), 'matplotlib.ticker.FormatStrFormatter', 'mpl.ticker.FormatStrFormatter', (['ytickformat'], {}), '(ytickformat)\n', (14955, 14968), True, 'import matplotlib as mpl\n'), ((16827, 16862), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', 'num_xticks'], {}), '(xmin, xmax, num_xticks)\n', (16838, 16862), True, 'import numpy as np\n'), ((17125, 17160), 'numpy.linspace', 'np.linspace', (['ymin', 'ymax', 'num_yticks'], {}), '(ymin, ymax, num_yticks)\n', (17136, 17160), True, 'import numpy as np\n'), ((19626, 19703), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'family': 'family', 'weight': 'weight', 'size': 'titlesize'}), '(family=family, weight=weight, size=titlesize)\n', (19657, 19703), True, 'import matplotlib as mpl\n'), ((20078, 20156), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'family': 'family', 'weight': 'weight', 'size': 'legendsize'}), '(family=family, weight=weight, size=legendsize)\n', (20109, 20156), True, 'import matplotlib as mpl\n'), ((34075, 34099), 'numpy.array', 'np.array', (['(0, 0, 0, 255)'], {}), '((0, 0, 0, 255))\n', (34083, 34099), True, 'import numpy as np\n'), ((34238, 34247), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (34245, 34247), True, 'from matplotlib import pyplot as plt\n'), ((41356, 41394), 'ubelt.argval', 'ub.argval', (['"""--cmap-hack"""'], {'default': 'None'}), "('--cmap-hack', default=None)\n", (41365, 41394), True, 'import ubelt as ub\n'), ((41417, 41457), 'ubelt.argval', 'ub.argval', (['"""--ncolor-hack"""'], {'default': 'None'}), "('--ncolor-hack', default=None)\n", (41426, 41457), True, 'import ubelt as ub\n'), ((41688, 41723), 'numpy.random.RandomState', 'np.random.RandomState', (['(seed + 48930)'], {}), '(seed + 48930)\n', (41709, 41723), True, 'import numpy as np\n'), ((41830, 41855), 'matplotlib.pyplot.cm.get_cmap', 'plt.cm.get_cmap', (['cmap_str'], {}), '(cmap_str)\n', (41845, 41855), True, 'from matplotlib import pyplot as plt\n'), ((42033, 42069), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {'endpoint': '(False)'}), '(0, 1, N, endpoint=False)\n', (42044, 42069), True, 'import numpy as np\n'), ((43071, 43130), 'numpy.linspace', 'np.linspace', (['hmin', 'hmax_', 'N'], {'endpoint': '(False)', 'dtype': 'np.float'}), '(hmin, hmax_, N, endpoint=False, dtype=np.float)\n', (43082, 43130), True, 'import numpy as np\n'), ((47610, 47651), 'ubelt.group_items', 'ub.group_items', (['atomic_axes', 'groupid_list'], {}), '(atomic_axes, groupid_list)\n', (47624, 47651), True, 'import ubelt as ub\n'), ((48724, 48766), 'netharn.util.clipwhite_ondisk', 'util.clipwhite_ondisk', (['fpath_in', 'fpath_out'], {}), '(fpath_in, fpath_out)\n', (48745, 48766), False, 'from netharn import util\n'), ((55181, 55208), 'matplotlib.pyplot.switch_backend', 'plt.switch_backend', (['backend'], {}), '(backend)\n', (55199, 55208), True, 'from matplotlib import pyplot as plt\n'), ((55235, 55251), 'matplotlib.use', 'mpl.use', (['backend'], {}), '(backend)\n', (55242, 55251), True, 'import matplotlib as mpl\n'), ((57936, 57958), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {}), '()\n', (57956, 57958), True, 'import matplotlib as mpl\n'), ((58747, 58818), 'netharn.util.convert_colorspace', 'util.convert_colorspace', (['img'], {'dst_space': 'dst_space', 'src_space': 'colorspace'}), '(img, dst_space=dst_space, src_space=colorspace)\n', (58770, 58818), False, 'from netharn import util\n'), ((63164, 63180), 'numpy.array', 'np.array', (['colors'], {}), '(colors)\n', (63172, 63180), True, 'import numpy as np\n'), ((71004, 71024), 'numpy.array', 'np.array', (['score_list'], {}), '(score_list)\n', (71012, 71024), True, 'import numpy as np\n'), ((81003, 81062), 'matplotlib.font_manager.FontProperties', 'mpl.font_manager.FontProperties', ([], {'size': '(6)', 'family': '"""monospace"""'}), "(size=6, family='monospace')\n", (81034, 81062), True, 'import matplotlib as mpl\n'), ((84177, 84195), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (84189, 84195), True, 'from matplotlib import pyplot as plt\n'), ((4189, 4215), 'numpy.array', 'np.array', (['xdata'], {'copy': '(True)'}), '(xdata, copy=True)\n', (4197, 4215), True, 'import numpy as np\n'), ((22435, 22444), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22442, 22444), True, 'from matplotlib import pyplot as plt\n'), ((22569, 22585), 'matplotlib.pyplot.figure', 'plt.figure', (['fnum'], {}), '(fnum)\n', (22579, 22585), True, 'from matplotlib import pyplot as plt\n'), ((23987, 23996), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (23994, 23996), True, 'from matplotlib import pyplot as plt\n'), ((24114, 24135), 'matplotlib.pyplot.subplot', 'plt.subplot', (['*subspec'], {}), '(*subspec)\n', (24125, 24135), True, 'from matplotlib import pyplot as plt\n'), ((24175, 24184), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (24182, 24184), True, 'from matplotlib import pyplot as plt\n'), ((24980, 24995), 'numpy.diag', 'np.diag', (['values'], {}), '(values)\n', (24987, 24995), True, 'import numpy as np\n'), ((42285, 42311), 'numpy.abs', 'np.abs', (['range_[range_ < 0]'], {}), '(range_[range_ < 0])\n', (42291, 42311), True, 'import numpy as np\n'), ((43429, 43452), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*x'], {}), '(*x)\n', (43448, 43452), False, 'import colorsys\n'), ((48163, 48202), 'netharn.util.clipwhite_ondisk', 'util.clipwhite_ondisk', (['subpath', 'subpath'], {}), '(subpath, subpath)\n', (48184, 48202), False, 'from netharn import util\n'), ((80168, 80193), 'numpy.vstack', 'np.vstack', (['[x1, y1, w, h]'], {}), '([x1, y1, w, h])\n', (80177, 80193), True, 'import numpy as np\n'), ((22503, 22515), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22513, 22515), True, 'from matplotlib import pyplot as plt\n'), ((22644, 22653), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (22651, 22653), True, 'from matplotlib import pyplot as plt\n'), ((41012, 41032), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', 'N'], {}), '(0, 1, N)\n', (41023, 41032), True, 'import numpy as np\n'), ((42206, 42225), 'numpy.all', 'np.all', (['(range_ >= 0)'], {}), '(range_ >= 0)\n', (42212, 42225), True, 'import numpy as np\n'), ((42230, 42249), 'numpy.all', 'np.all', (['(range_ <= 1)'], {}), '(range_ <= 1)\n', (42236, 42249), True, 'import numpy as np\n'), ((59023, 59055), 'numpy.array', 'np.array', (['imgRGB'], {'dtype': 'np.uint8'}), '(imgRGB, dtype=np.uint8)\n', (59031, 59055), True, 'import numpy as np\n'), ((59413, 59433), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""gray"""'], {}), "('gray')\n", (59425, 59433), True, 'from matplotlib import pyplot as plt\n'), ((59508, 59526), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['cmap'], {}), '(cmap)\n', (59520, 59526), True, 'from matplotlib import pyplot as plt\n'), ((77625, 77643), 'numpy.sqrt', 'np.sqrt', (['nSubplots'], {}), '(nSubplots)\n', (77632, 77643), True, 'import numpy as np\n'), ((80315, 80340), 'numpy.vstack', 'np.vstack', (['[x1, y1, w, h]'], {}), '([x1, y1, w, h])\n', (80324, 80340), True, 'import numpy as np\n'), ((89386, 89412), 'matplotlib.colors.BASE_COLORS.keys', 'mcolors.BASE_COLORS.keys', ([], {}), '()\n', (89410, 89412), True, 'from matplotlib import colors as mcolors\n'), ((89421, 89447), 'matplotlib.colors.CSS4_COLORS.keys', 'mcolors.CSS4_COLORS.keys', ([], {}), '()\n', (89445, 89447), True, 'from matplotlib import colors as mcolors\n'), ((16699, 16712), 'numpy.ceil', 'np.ceil', (['xmin'], {}), '(xmin)\n', (16706, 16712), True, 'import numpy as np\n'), ((16714, 16728), 'numpy.floor', 'np.floor', (['xmax'], {}), '(xmax)\n', (16722, 16728), True, 'import numpy as np\n'), ((16997, 17010), 'numpy.ceil', 'np.ceil', (['ymin'], {}), '(ymin)\n', (17004, 17010), True, 'import numpy as np\n'), ((17012, 17026), 'numpy.floor', 'np.floor', (['ymax'], {}), '(ymax)\n', (17020, 17026), True, 'import numpy as np\n'), ((41647, 41670), 'ubelt.hash_data', 'ub.hash_data', (['cmap_seed'], {}), '(cmap_seed)\n', (41659, 41670), True, 'import ubelt as ub\n'), ((76518, 76544), 'numpy.ceil', 'np.ceil', (['(nSubplots / nRows)'], {}), '(nSubplots / nRows)\n', (76525, 76544), True, 'import numpy as np\n'), ((76610, 76636), 'numpy.ceil', 'np.ceil', (['(nSubplots / nCols)'], {}), '(nSubplots / nCols)\n', (76617, 76636), True, 'import numpy as np\n')] |
from torchvision.transforms import transforms
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import torch as T
import torch.optim as optim
from model import Generator, Discriminator
from loss_fn import GeneratorLoss, TVLoss
from utils import show_progress, save
import datetime
import gc
import os
class ConcatDataset(T.utils.data.Dataset):
def __init__(self, *datasets):
self.datasets = datasets
def __getitem__(self, i):
return tuple(d[i] for d in self.datasets)
def __len__(self):
return min(len(d) for d in self.datasets)
device = 'cuda' if T.cuda.is_available() else 'cpu'
BATCH_SIZE = 16
SIZE_HR = 256
SIZE_LR = 64
num_workers = 2
rootpath = '../data'
transform_hr = transforms.Compose([
transforms.Resize((SIZE_HR, SIZE_HR)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_hr = ImageFolder(rootpath, transform=transform_hr)
transform_lr = transforms.Compose([
transforms.Resize((SIZE_LR, SIZE_LR)),
transforms.ToTensor(),
transforms.GaussianBlur(kernel_size=25),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
data_lr = ImageFolder(rootpath, transform=transform_lr)
full_data = ConcatDataset(data_lr, data_hr)
loader = DataLoader(full_data, BATCH_SIZE, num_workers=num_workers)
generator = Generator(3, 64).to(device)
discriminator = Discriminator(3, 64).to(device)
lr = 1e-1000
gen_optimizer = optim.Adam(generator.parameters(), lr=lr)
disc_optimizer = optim.Adam(discriminator.parameters(), lr=lr)
generator_criterion = GeneratorLoss().to(device)
g_losses = []
d_losses = []
EPOCHS = 1000
if 'models' not in os.listdir():
os.mkdir('models')
save_path = '../models/'
# <----- TRAINING LOOP ----->
for epoch in range(1, EPOCHS):
generator.train()
discriminator.train()
print(f'EPOCH [{epoch}/{EPOCHS}]')
sum_d_loss = 0
sum_g_loss = 0
gc.collect()
T.cuda.empty_cache()
start = datetime.datetime.now()
for idx, (item, target) in enumerate(loader):
item = item[0].to(device)
target = target[0].to(device)
fake_image = generator(item)
discriminator.zero_grad()
real_out = discriminator(target).mean()
fake_out = discriminator(fake_image).mean()
d_loss = 1 - real_out + fake_out
d_loss.backward(retain_graph=True)
generator.zero_grad()
g_loss = generator_criterion(fake_out, fake_image, target)
g_loss.backward()
fake_img = generator(item)
fake_out = discriminator(fake_img).mean()
if idx % 100 == 0:
print(
f'Batch {idx}/{loader.__len__()} \nLoss (Generator) {g_loss.detach().cpu()}\nLoss (Discriminator) {d_loss.detach().cpu()}'
)
pred = fake_img[0].detach().cpu()
save(generator, discriminator, save_path)
show_progress([item.detach().cpu()[0], pred, target.detach().cpu()[0]], save=True, show=False)
gen_optimizer.step()
sum_d_loss += d_loss.detach().cpu()
sum_g_loss += g_loss.detach().cpu()
print(f'Time per epoch = {start - datetime.datetime.now()}')
g_losses.append(sum_g_loss / loader.__len__())
d_losses.append(sum_d_loss / loader.__len__())
print(f'D_loss {sum_d_loss}')
print(f'G_loss {sum_g_loss}') | [
"utils.save",
"os.listdir",
"model.Discriminator",
"torchvision.datasets.ImageFolder",
"datetime.datetime.now",
"torch.cuda.is_available",
"torchvision.transforms.transforms.Resize",
"os.mkdir",
"gc.collect",
"torch.utils.data.DataLoader",
"torchvision.transforms.transforms.ToTensor",
"torchvi... | [((1034, 1079), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['rootpath'], {'transform': 'transform_hr'}), '(rootpath, transform=transform_hr)\n', (1045, 1079), False, 'from torchvision.datasets import ImageFolder\n'), ((1459, 1504), 'torchvision.datasets.ImageFolder', 'ImageFolder', (['rootpath'], {'transform': 'transform_lr'}), '(rootpath, transform=transform_lr)\n', (1470, 1504), False, 'from torchvision.datasets import ImageFolder\n'), ((1559, 1617), 'torch.utils.data.DataLoader', 'DataLoader', (['full_data', 'BATCH_SIZE'], {'num_workers': 'num_workers'}), '(full_data, BATCH_SIZE, num_workers=num_workers)\n', (1569, 1617), False, 'from torch.utils.data import DataLoader\n'), ((625, 646), 'torch.cuda.is_available', 'T.cuda.is_available', ([], {}), '()\n', (644, 646), True, 'import torch as T\n'), ((1957, 1969), 'os.listdir', 'os.listdir', ([], {}), '()\n', (1967, 1969), False, 'import os\n'), ((1975, 1993), 'os.mkdir', 'os.mkdir', (['"""models"""'], {}), "('models')\n", (1983, 1993), False, 'import os\n'), ((2212, 2224), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2222, 2224), False, 'import gc\n'), ((2229, 2249), 'torch.cuda.empty_cache', 'T.cuda.empty_cache', ([], {}), '()\n', (2247, 2249), True, 'import torch as T\n'), ((2262, 2285), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2283, 2285), False, 'import datetime\n'), ((807, 844), 'torchvision.transforms.transforms.Resize', 'transforms.Resize', (['(SIZE_HR, SIZE_HR)'], {}), '((SIZE_HR, SIZE_HR))\n', (824, 844), False, 'from torchvision.transforms import transforms\n'), ((878, 899), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (897, 899), False, 'from torchvision.transforms import transforms\n'), ((933, 987), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (953, 987), False, 'from torchvision.transforms import transforms\n'), ((1151, 1188), 'torchvision.transforms.transforms.Resize', 'transforms.Resize', (['(SIZE_LR, SIZE_LR)'], {}), '((SIZE_LR, SIZE_LR))\n', (1168, 1188), False, 'from torchvision.transforms import transforms\n'), ((1224, 1245), 'torchvision.transforms.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1243, 1245), False, 'from torchvision.transforms import transforms\n'), ((1281, 1320), 'torchvision.transforms.transforms.GaussianBlur', 'transforms.GaussianBlur', ([], {'kernel_size': '(25)'}), '(kernel_size=25)\n', (1304, 1320), False, 'from torchvision.transforms import transforms\n'), ((1356, 1410), 'torchvision.transforms.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1376, 1410), False, 'from torchvision.transforms import transforms\n'), ((1631, 1647), 'model.Generator', 'Generator', (['(3)', '(64)'], {}), '(3, 64)\n', (1640, 1647), False, 'from model import Generator, Discriminator\n'), ((1675, 1695), 'model.Discriminator', 'Discriminator', (['(3)', '(64)'], {}), '(3, 64)\n', (1688, 1695), False, 'from model import Generator, Discriminator\n'), ((1866, 1881), 'loss_fn.GeneratorLoss', 'GeneratorLoss', ([], {}), '()\n', (1879, 1881), False, 'from loss_fn import GeneratorLoss, TVLoss\n'), ((3131, 3172), 'utils.save', 'save', (['generator', 'discriminator', 'save_path'], {}), '(generator, discriminator, save_path)\n', (3135, 3172), False, 'from utils import show_progress, save\n'), ((3437, 3460), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (3458, 3460), False, 'import datetime\n')] |
"""This algorithm implements the Wang Generalization algotithm with constraint checking
This algorithm simplifies lines. It detects for each line the bends. It analyze the bend and
remove the bends that are below a certain diameter. The point and lines that do not need
to be simplified are still used to enforce topology integrity between those feature that need to be simplified
Limits and constraints
Always works better when the line to process meet the OGC simple line.
"""
import math, sys
from shapely.geometry import Point, LineString, LinearRing, Polygon
from shapely.prepared import prep
from shapely import affinity
from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException
# Internal constant ===> Should be modify with care...
_AREA_CMP_INDEX = .75 # Compactness index factor applied to the adjusted area
#Internal key word constants
_BURNED = "Burned"
_DIAMETER = "diameter"
_SIMPLIFIED = 'Simplified'
_NOT_SIMPLIFIED = 'NotSimplified'
_UNSIMPLIFIABLE = 'Unsimplifiable'
class LineStringSb(LineStringSc):
"""A class to represent a LineString used by the SherBend algorithm
Attributes
----------
coords : List
A list of coordinates (x,y)
original_type: str
The original type of the feature
min_adj_are : float
The minimal adjusted area below which the vends are deleted
properties : dict
The dictionary of the properties (attributes of the features)
fast_access : Boolean
A flag to indicate if we keep a copy od the coordinate in order to accelrate the access becase
the access to the C function is slow
"""
def __init__(self, coords, original_type, min_adj_area, layer_name, properties, fast_access=True):
super().__init__(coords)
self.sb_original_type = original_type
self.sb_layer_name = layer_name
self.sb_properties = properties
self.sb_min_adj_area = min_adj_area
self._sb_fast_access = fast_access
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
# Declaration of the instance variable
self.sb_geom_type = self.geom_type # variable defined to avoid slower C calls with geom_type
self.sb_is_simplest = False # The line is not at its simplest form
self.sb_bends = [] # Holder for the bend of the line
# Is the line string closed
@property
def sb_is_closed(self):
"""This method tests if a line is closed (first/last coordinates are the same)
Parameters
----------
None
Returns
-------
bool
True: the line is closed or False the line is open
"""
try:
return self._sb_is_closed
except AttributeError:
# A closed line need at least 4 vertex to be valid
if len(self.coords) >= 4 and GenUtil.distance(self.coords[0], self.coords[-1]) <= GenUtil.ZERO:
self._sb_is_closed = True
else:
self._sb_is_closed = False
return self._sb_is_closed
@property
def coords(self):
"""This method keeps a copy of the coordinate in a list.
This methods allows a faster acces than to always access the coordinates from the C call
of shapely. the drawback more memory space
Parameters
----------
None
Returns
-------
list
Coordinate of the LineString
"""
if self._sb_fast_access:
return self.__lst_coords
else:
return super().coords
@coords.setter
def coords(self, coords):
"""Set the coordinate of a LineString
Parameters
----------
coords : list
List of x,y coordinates
Returns
-------
None
"""
# Access the coord attribute in the parent class
super(LineStringSb, self.__class__).coords.fset(self, coords) # Odd writing but it's needed...
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
# Delete variable that are now outdated. so they will be computed next time it will be accessed
try:
del self._vertex_orientation
except AttributeError:
pass
@property
def vertex_orientation(self):
"""This method calculates the orientation of the vertex
List containing the orientation at each vertex of the line.
-1: anti clockwise, +1 Clockwise; 0 Straight line
For closed line the first and last vertice bear the same value
For open line the first and last value are None
Parameters
----------
None
Returns
-------
None
"""
try:
return self._vertex_orientation
except AttributeError:
self._vertex_orientation = []
for i in range(1, len(self.coords) - 1): # '1' and 'cnt-1' to 'forget' first and last vertice
orient = GenUtil.orientation(self.coords[i-1], self.coords[i], self.coords[i+1])
self._vertex_orientation.append(orient)
if self.is_closed:
# Case of a closed line or polygon; we do not copy the first and lat even if they are the same
orient = GenUtil.orientation(self.coords[-2], self.coords[0], self.coords[1])
self._vertex_orientation = [orient] + self._vertex_orientation
else:
# Case of an open line; the first and last are None
orient = None
self._vertex_orientation = [orient] + self._vertex_orientation + [orient]
return self._vertex_orientation
def _remove_colinear_vertex(self):
"""This method remove the co linear vertex in the line string. Also handles closed line
Parameters
----------
None
Returns
-------
None
"""
if len(self.coords) <= 2:
# Nothing to do with a line with 2 points
pass
else:
# Detect the position of the colinear vertex
vertex_to_del = [i for i, orient in (enumerate(self.vertex_orientation)) if orient == 0]
if len(vertex_to_del) >= 1:
# Delete the co linear vertex
lst_coords = list(self.coords)
for i in reversed(vertex_to_del):
del(lst_coords[i])
if vertex_to_del[0] == 0:
# When delete the first vertex than we need to recopy the "new first" to the last vertice
lst_coords = lst_coords + [lst_coords[0]]
self.coords = lst_coords
def _rotate_start_bend(self):
"""Rotate a closed line string so the start of the line is also the start of a clockwise bend
To be done on closed line only
Parameters
----------
None
Returns
-------
None
"""
rotate = None
max_v = len(self.vertex_orientation)
for i in range(max_v):
j = (i+1) % max_v
if self.vertex_orientation[i] == GenUtil.CLOCKWISE and \
self.vertex_orientation[j] == GenUtil.ANTI_CLOCKWISE:
rotate = i
break
# Rotate the frist last vertex to the position of the biggest bend
if rotate is None:
# All the bend are clockwise. Nothing to do
pass
elif rotate == 0:
# The line string does not to be rotated
pass
else:
lst_coord = self.coords[rotate:] + self.coords[1:rotate+1]
self.coords = lst_coord # Update the LineString coordinate
def _extract_coords(self, i,j):
"""Extract the coordinate between index [i,j]
If j is lower than i act like a circular array and avoid duplication of first/last vertice
Parameters
----------
i,j : int
Index used to extract a sub list
Returns
-------
List
list of (x,y) coordinates
"""
if i <= j:
lst_coords = self.coords[i:j+1]
else:
lst_coords = self.coords[i:] + self.coords[0:j+1]
return lst_coords
def _change_inflexion(self, i):
"""Flag if there is an inflexion between at the specified vertices.
There is inflexion when a change of orientation occurs from clock wise to anti clocwise or vice cersa
Parameters
----------
i : int
Index of for vertex orientation
Returns
-------
bool
Flag indicating if an inflexion occurs or not
"""
max_v = len(self.vertex_orientation)
if (self.vertex_orientation[i] == GenUtil.ANTI_CLOCKWISE and
self.vertex_orientation[(i+1) % max_v] == GenUtil.CLOCKWISE) or \
(self.vertex_orientation[i] == GenUtil.CLOCKWISE and
self.vertex_orientation[(i+1) % max_v] == GenUtil.ANTI_CLOCKWISE):
inflexion = True
else:
inflexion = False
return inflexion
def _add_bends(self, inflexions):
"""Add Bend to the line from the inflexion list
Parameters
----------
inflexions : List
List of the inflexions in the list
Returns
-------
None
"""
for k in range(len(inflexions) - 1):
i = inflexions[k][0]
j = inflexions[k + 1][1]
self.sb_bends.append(Bend(i, j, self._extract_coords(i, j)))
def _create_bends(self):
"""Create the bends in the line
Parameters
----------
None
Returns
-------
None
"""
# Delete any actual bend information
self.sb_bends = []
# Remove the colinear vertice in order to facilitate bend detection (moreover colinaer vertice are useless)
self._remove_colinear_vertex()
inflexions = []
max = len(self.vertex_orientation)
if self.is_closed:
# Rotate the line to position at the start of a bend
self._rotate_start_bend()
# The vertex_oriention list is considered a circular list
for i in range(max):
j = (i + 1) % max
if self._change_inflexion(i):
inflexions.append((i, j))
# Create the bend from the inflexion point
if inflexions:
if len(inflexions) >= 3:
# If there is more than 23 inflexions we add another circular inflexion
i = inflexions[-1][0]
j = inflexions[0][1]
inflexions.append((i, j))
# Transform the inflexion into bends
self._add_bends(inflexions)
else:
# The vertex_oriention list is not considered a circular list
if max == 3:
# Special case there is only one bend to simplify
j = len(self.coords)-1
self.sb_bends.append(Bend(0, j, self._extract_coords(0, j)))
elif max >= 4:
for i in range(1, max-2):
if self._change_inflexion(i):
inflexions.append((i, i+1))
# Add inflexion to add the first and last bend
inflexions = [(0, None)] + inflexions + [(None, max-1)]
# Transform inflexion into bends
self._add_bends(inflexions)
return
def _sort_bends(self):
"""Sort the bends by order of ascending min_adj_are
Parameters
----------
None
Returns
-------
None
"""
lst_bends = []
for i, bend in enumerate(self.sb_bends):
if bend.adj_area <= self.sb_min_adj_area:
# Only select the bend below the minimum adjusted area
lst_bends.append((i, bend.adj_area))
# Sort based of the adj_area from smallest to biggest
lst_bends.sort(key=lambda tup: tup[1]) # sorts in place
return lst_bends
def _offset_bend_ij(self, i, j):
""""Offset the value of the different bend i,j because one or more vertice of the line were removed
Handle circular list when j < i
Parameters
----------
i,j : int
Index in the line where the vertice were removed
Returns
-------
None
"""
if i < j:
offset = j-i-1
else:
offset = j
for bend in self.sb_bends:
if bend.status == _NOT_SIMPLIFIED:
if bend.i < bend.j:
if bend.i >= j:
bend.i -= offset
bend.j -= offset
else:
if bend.i >= j:
bend.i -= offset
def _make_line_ccw(self):
"""Make sure the line is counter clockwise.
Only apply to closed line
Parameters
----------
None
Returns
-------
None
"""
if self.sb_is_closed:
tmp_ring = LinearRing(self.coords)
if not tmp_ring.is_ccw:
# The linear ring is clockwise. Reverse the coordinates to make it ccw
self.coords = list(reversed(self.coords))
def simplify(self, diameter, s_constraints=None):
"""Simplify the line by reducing each bend
Parameters
----------
None
Returns
-------
None
"""
nbr_bend_simplified = 0
# Make sure the line is counter clockwise
#
self._make_line_ccw()
# Create the bend in the line
self._create_bends()
max_bends = len(self.sb_bends)
sorted_bends = self._sort_bends()
if len(sorted_bends) == 0:
# No more bend to simplify. Line is at its simplest form
self.sb_is_simplest = True
elif len(sorted_bends) >= 2:
# Make the biggest bend (last one) unsimplifiable
ind_last = sorted_bends[-1][0]
self.sb_bends[ind_last].status = _UNSIMPLIFIABLE
# Loop over each bend to simplify them
for sorted_bend in sorted_bends:
ind = sorted_bend[0]
if self.sb_bends[ind].status == _NOT_SIMPLIFIED:
ind_before = None
ind_after = None
if self.sb_is_closed:
if max_bends >= 2:
ind_before = (ind-1) % max_bends
ind_after = (ind+1) % max_bends
else:
if ind > 0:
ind_before = ind-1
if ind < max_bends-1:
ind_after = ind+1
# Validate the spatial constraints
i = self.sb_bends[ind].i
j = self.sb_bends[ind].j
if i < j:
lst_coords = self.coords[0:i+1] + self.coords[j:]
else:
# Manage circular list
lst_coords = self.coords[j:i+1] + self.coords[j:j+1]
if self.is_closed:
if len(lst_coords) >= 4:
if s_constraints is not None:
in_conflict = s_constraints.check_constraints(self, self.sb_bends[ind])
else:
in_conflict = False
else:
# A closed line cannot have less than 4 vertices
in_conflict = True
else:
if len(lst_coords) >= 2:
if s_constraints is not None:
in_conflict = s_constraints.check_constraints(self, self.sb_bends[ind])
else:
in_conflict = False
else:
# An open line cannot have less than 3 vertices
in_conflict = True
if not in_conflict:
# Update the coordinates
self.coords = lst_coords
# Bend before and after must no be simplified in this pass maybe a next pass
if ind_before is not None:
self.sb_bends[ind_before].status = _UNSIMPLIFIABLE
if ind_after is not None:
self.sb_bends[ind_after].status = _UNSIMPLIFIABLE
self.sb_bends[ind].status = _SIMPLIFIED
nbr_bend_simplified += 1
self._offset_bend_ij(i, j)
return nbr_bend_simplified
class PointSb(PointSc):
"""
A class to represent a Point used by the SherBend algorithm
Attributes
----------
coords : tuple
A tuple (x,y) representing one coordinate
properties : dict
The dictionary of the properties (attributes of the features)
fast_access : Boolean
A flag to indicate if we keep a copy od the coordinate in order to accelrate the access becase
the access to the C function is slow
"""
def __init__(self, coords, layer_name, properties, fast_access=True):
super().__init__(coords)
self.sb_is_simplest = True
self.sb_layer_name = layer_name
self.sb_properties = properties
self.sb_original_type = GenUtil.POINT
self.sb_geom_type = GenUtil.POINT # For faster access than calling C (geom_type)
self._sb_fast_access = fast_access
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
@property
def coords(self):
if self._sb_fast_access:
return self.__lst_coords
else:
return super().coords
@coords.setter
def coords(self, coords):
Point.coords.__set__(self, coords)
if self._sb_fast_access:
self.__lst_coords = list(super().coords)
class SpatialConstraints(object):
"""
A class to represent validation of spatial constraints
Attributes
----------
simplicity : bool
Flag indicating if simplicity constraint (self crossing) is validated
crossing : bool
Flag indicating if crossing constraint (intersection between feature) is validated
sidedness : bool
Flag indicating if sidedness constraint (relative adjacency) is validated
s_container : SpatialContainer
Object containing all the feature
"""
def __init__(self, simplicity=True, crossing=True, sidedness=True, s_container=None):
"""Constructor for the SpatialConstraint class"""
self.simplicity = simplicity
self.crossing = crossing
self.sidedness = sidedness
self.s_container = s_container
self.nbr_err_simplicity = 0
self.nbr_err_crossing = 0
self.nbr_err_sidedness = 0
def check_constraints(self, line, bend):
"""Validate the different spatial constraint
Parameters
----------
line : LineStringSb
LineString to validate for spatial constraints
bend : Bend
Bend to validate for spatial constraints
Returns
-------
bool
Flag indicating if the spatial constrainst are valid or not"""
in_conflict = False
if not in_conflict:
in_conflict = self._check_simplicity(line, bend.replacement_line)
if not in_conflict:
in_conflict = self._check_crossing(line, bend.replacement_line)
if not in_conflict:
in_conflict = self._check_sidedness(line, bend.polygon)
return in_conflict
def _check_simplicity(self, line, new_sub_line):
"""Check if the new sub line creates a self intersection in the line
Parameter
---------
line : LineStringSb
LineString to validate for self intersection
new_sub_line : LineString
New LineString to validate for self intersection
Returns
-------
Boolean
Flag indicating if the line is simple or not
"""
# Create a very short line so that the line does not -touch the start and end line (increase performance)
smaller_sub_line = affinity.scale(new_sub_line, xfact=1. - GenUtil.ZERO, yfact=1. - GenUtil.ZERO)
in_conflict = False
prepared_smaller_sub_line = prep(smaller_sub_line)
if prepared_smaller_sub_line.intersects(line):
in_conflict = True
self.nbr_err_simplicity += 1
return in_conflict
def _check_crossing(self, line, new_sub_line):
"""Check if the new sub line intersects other line
Parameter
---------
line : LineStringSb
LineString to validate for intersection with other line
new_sub_line : LineString
New LineString to validate for intersection with other line
Returns
-------
Boolean
Flag indicating if the line intersect with other line or not
"""
features = self.s_container.get_features(new_sub_line.bounds, remove_features=(line,))
# Check that the new middle line does not cross any interior holes of the polygon
prepared_new_sub_line = prep(new_sub_line)
in_conflict = False
gen_crosses = filter(prepared_new_sub_line.intersects, features)
for feature in gen_crosses:
in_conflict = True
self.nbr_err_crossing += 1
break
return in_conflict
def _check_sidedness(self, line, pol):
"""Validate the line for adjacency constraints
Parameter
---------
line : LineStringSb
LineString to validate for adjacency
new_sub_line : LineString
New Polygon to check for adjacency
Returns
-------
Boolean
Flag indicating if the line creates or not adjacency problem
"""
features = self.s_container.get_features(pol.bounds, remove_features=(line,))
# Check that the new middle line does not cross any interior holes of the polygon
prepared_pol = prep(pol)
gen_contains = filter(prepared_pol.contains, features)
in_conflict = False
for feature in gen_contains:
in_conflict = True
self.nbr_err_sidedness += 1
break
return in_conflict
class Bend(object):
"""Class defining the attributes and operations for bend manipulation
Attributes: None
"""
def __init__(self, i, j, bend_coords):
"""Constructor of the class
Parameters
----------
i : int
Index of the start of the bend in the list of coordinates
j : int
Index of the end of the bend in the list of coordinates
bend_coords : list
List of x,y coordinate of the bend
Returns
-------
None
"""
self.i = i # Index of the start of the bend coordinate
self.j = j # Index of the end of the bend coordinate
self.status = _NOT_SIMPLIFIED # Type of bend by default: UNTOUCHED
self.bend_coords = bend_coords # List of the coordinate forming the bend
@property
def polygon(self): # Polygon formed by the bend
"""Creates a polygon from the coordinates forming the bend
Parameters
----------
None
Returns
-------
Polygon
polygon formed by the coordinates
"""
try:
return self._polygon
except AttributeError:
self._polygon = Polygon(self.bend_coords)
return self._polygon
@property
def area(self):
"""Constructor
Parameters
----------
None
Returns
-------
float
Area of the polygon
"""
try:
return self._area
except AttributeError:
self._area = self.polygon.area
if self._area <= GenUtil.ZERO:
self._area = GenUtil.ZERO # In case of area=0 we assume almost 0 area instead
return self._area
@property
def base(self):
"""Length of the base of the bend. Distance between the first and last coordinate
Parameters
----------
None
Returns
-------
Float
Length of the bend of the polygon
"""
try:
return self._base
except AttributeError:
self._base = GenUtil.distance(self.bend_coords[0], self.bend_coords[-1])
if self._base <= GenUtil.ZERO:
self._base = GenUtil.ZERO # Avois a case of division by zero
return self._base
@property
def perimeter(self):
"""Length of the perimeter of the bend (polygon)
Parameters
----------
None
Returns
-------
float
Length of the perimeter
"""
try:
return self._perimeter
except AttributeError:
self._perimeter = self.polygon.length
return self._perimeter
@property
def cmp_index(self):
"""Calculates the value of the compactness index
Parameters
----------
None
Returns
-------
float
Value of the compactness index
"""
try:
return self._cmp_index
except AttributeError:
self._cmp_index = GenUtil.calculate_compactness_index(self.area, self.perimeter)
return self._cmp_index
@property
def adj_area(self):
"""Calculates the value of the compactness index of the polygon
Parameters
----------
None
Returns
-------
float
Value of the compactness index
"""
try:
return self._adj_area
except AttributeError:
self._adj_area = GenUtil.calculate_adjusted_area(self.area, self.cmp_index)
return self._adj_area
@property
def replacement_line(self):
"""Calculates the replacement line of the bend
Parameters
----------
None
Returns
-------
LineString
Replacement line for the bend
"""
try:
return self._replacement_line
except AttributeError:
self._replacement_line = LineString((self.bend_coords[0], self.bend_coords[-1]))
return self._replacement_line
def create_replacement_line (lst_coords, bend, diameter):
"""Calculate the replacement line for a bend"""
# Extract the sub line containing the bend with one extra vertice on each side
sub_line = LineStringSb(lst_coords[bend.i-1:bend.j+1])
bend_i = 1
bend_j = len(bend.j)-1
# Translate to sub line so that the bend starts at 0,0
xoff, yoff = lst_coords[bend.i][0], lst_coords[bend.i][1]
line_translate = affinity.affine_transform(sub_line, [1, 0, 0, 1, -xoff, -yoff])
# Extract the angle between the base of the bend (bendi, bendj) and the x axis
lst_coord = list(line_translate.coords)
p0 = (lst_coord[bend_j][0], lst_coord[bend_j][1])
p1 = (lst_coord[bend_i][0], lst_coord[bend_i][1])
p2 = (abs(p0[0])+1., 0)
angle = GenUtil.angle_vecor(p0, p1, p2)
# p0_x = line1_coord[bend_j][0]
# p0_y = line1_coord[bend_j][1]
# p1_x = abs(p0_x) + 1. # In case x == 0
# p1_y = 0.
# dot = p0_x * p1_x + p0_y * p1_y
# len_a = (p0_x ** 2 + p0_y ** 2) ** .5
# len_b = (p1_x ** 2 + p1_y ** 2) ** .5
angle = math.acos(dot / (len_a * len_b))
angle = (angle * 180 / math.pi)
if p0[1] >= 0.:
angle = -angle # Clockwise rotation
# if p0_y >= 0.:
# angle = -angle
# Rotate the bend so it's on the x axis
a = math.cos(angle)
b = -math.sin(angle)
d = math.sin(angle)
e = math.cos(angle)
line_rotate = affinity.rotate(line_translate, angle, origin=(0, 0))
lst_coords = list(line_rotate.coords)
# line_i = LineString(lst_coords[0:3])
# line_j = LineString(lst_coords[-2:])
# Calculate the angle between the base of the bend of segment before and after the bend
theta_i = lib_geobato.GenUtil.compute_angle(lst_coords[0], lst_coords[1], lst_coords[bend_j])
theta_j = lib_geobato.GenUtil.compute_angle(lst_coords[bend_j], lst_coords[-2], lst_coords[-1])
# Determine if the
bend_line = LineString(lst_coord[bend_i:bend_j+1])
(minx, miny, maxx, maxy) = bend_line.bounds
y_dynamic = (abs(miny) + abs(maxy)) * 10.
x_middle = (lst_coords[bend_i][0] + lst_coords[bend_j][0]) / 2.
line_y_positive = LineString(((x_middle, 0), (x_middle, y_dynamic)))
line_y_negative = LineString(((x_middle, 0), (x_middle, -y_dynamic)))
if line4.crosses(line_y_positive):
bend_side = +1
else:
if line4.crosses(line_y_negative):
bend_side = -1
if lst_coords[0][1] >= 0.:
start_line_side = 1
else:
start_line_side = -1
if lst_coords[-1][1] >= 0.:
end_line_side = 1
else:
end_line_side = -1
if (start_line_side * end_line_side == -1):
print("Nothing to do....")
line5 = LineString(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])
else:
# Both line are on the same side
if start_line_side == 1 and end_line_side == 1:
if bend_side == -1:
angle_bias = 2.
y_offset = -1
else:
angle_bias = 3.
y_offset = 1
if start_line_side == -1 and end_line_side == -1:
if bend_side == 1:
angle_bias = 2.
y_offset = 1
else:
angle_bias = 3.
y_offset = 1
theta_i = (180. - theta_i) / angle_bias
if theta_i >= 5.:
hypothenus = x_middle / math.cos(theta_i * math.pi / 180.)
y_height = math.sqrt(hypothenus ** 2 - x_middle ** 2)
if bend_side == -1:
y_height *= y_offset
new_coord = (x_middle, y_height)
line5 = LineString(lst_coords[0:bend_i + 1] + [new_coord] + lst_coords[bend_j:])
else:
print("Nothing to do....")
line5 = LineString(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])
class AlgoSherbend(object):
"""Main class for the Sherbend algorithm
Attributes:
- None
"""
def __init__(self, command, geo_content):
"""Constructor of the class
Parameters
----------
command : DataClass
Contains all the commands for the Sherbend line simplification algorithm
geo_content: DataClass
Contains the geo information needed for the the sherbend line reduction algorithm
Returns
-------
None
"""
self.command = command
self.geo_content = geo_content
self.nbr_bend_simplified = 0
def calculate_min_adj_area(self, diameter):
"""Calculates the minimum adjusted area of a band
Parameters
----------
diameter : float
diameter used to calculate the minimum adjusted area
Returns
-------
float
Minimum adjusted area
"""
return (_AREA_CMP_INDEX * math.pi * (diameter/2.0)**2.0)
def _calculate_adj_area(self, coords):
"""Calculates the adjusted area of a polygon
Parameters
----------
coords : list
List of x,y coordinates defining a polygon
Returns
-------
float
Minimum adjusted area
"""
pol = Polygon(coords)
cmp_index = GenUtil.calculate_compactness_index(pol.area, pol.length)
adj_area = GenUtil.calculate_adjusted_area(pol.area, cmp_index)
return adj_area
def load_features(self, geo_content, command):
"""Load the points, line strings and polygons in the spatial container.
The Polygons are deconstructued into a list LineString with clockwise orientation and extra added information
needed for the reconstruction of the original Polygon
Parameters
----------
geo_content : DataClass
Contains all the input#output geo spatial information
command :ParserArgument
Contains the parameters of the command line interface
Returns
-------
None
"""
features = [] # List of features to pass to the spatial container
# Load all the features in the spatial container
for feature in geo_content.in_features:
diameter = command.dlayer_dict[feature.sb_layer_name]
min_adj_area = self.calculate_min_adj_area(diameter)
if feature.geom_type == GenUtil.POINT:
out_feature = PointSb(feature.coords, feature.sb_layer_name, feature.sb_properties)
# Add the feature
features.append(out_feature)
elif feature.geom_type == GenUtil.LINE_STRING:
out_feature = out_feature = LineStringSb(feature.coords, GenUtil.LINE_STRING, min_adj_area, feature.sb_layer_name,
feature.sb_properties)
# Add the feature
features.append(out_feature)
elif feature.geom_type == GenUtil.POLYGON:
adj_area = self._calculate_adj_area(feature.exterior.coords)
# Only keep the polygon over the minimum adjusted area
if not command.exclude_polygon or adj_area > min_adj_area:
# Deconstruct the Polygon into a list of LineString with supplementary information
# needed to reconstruct the original Polygon
ext_feature = LineStringSb(feature.exterior.coords, GenUtil.POLYGON_EXTERIOR, min_adj_area,
feature.sb_layer_name, feature.sb_properties)
interiors = feature.interiors
int_features = []
# Extract the interiors as LineString
for interior in interiors:
adj_area = self._calculate_adj_area(interior.coords)
# Only keep the interior (hole) over the minimal adjusted area
if not command.exclude_hole or adj_area > min_adj_area:
interior = LineStringSb(interior.coords, GenUtil.POLYGON_INTERIOR, min_adj_area, None, None)
int_features.append(interior)
else:
geo_content.nbr_del_holes += len(feature.interiors)
# Add interior features needed for Polygon reconstruction
ext_feature.sb_interiors = int_features
# Add the exterior and the interior independently
features.append(ext_feature) # Add the exterior
features += int_features # Add the interiors
else:
# Do not add the feature (exterior and interiors ) in the spatial container
# Update some stats
geo_content.nbr_del_polygons += 1
geo_content.nbr_del_holes += len(feature.interiors)
else:
raise GeoSimException ("Invalid geometry type: {}".format(feature.geometry))
# Create the spatial container that will receive all the spatial features
self.s_container = SpatialContainer()
self.s_container.add_features(features) # Load all the features
return
def _manage_lines_simplification (self, s_constraints):
"""Main routine to simplify the lines
For each line to simplify
For each valid bend to simplify
check the consraints if the constraint are violated check alternative bends (only if the
number of bend to simplify is one.
One of the costly operation specially for very long line string (like contour) is to rewrite the
coordinates into the Shapely structure. This is why we updtade the shapely structure at the end
when the last bend of the line is processed
Parameters
----------
s_constraints : SpatialContraints
Spatal constraints to validate
Returns
-------
int
Total number of bend simplified
"""
iter_nbr = 0
total_nbr_bend_simplified = 0
# Iterate until all the line are simplified or there are no more line have to be simplified
while (True):
iter_nbr_bend_simplified = 0
print('Iteration # {}'.format(iter_nbr))
# Build line iterator
lines = (feature for feature in self.s_container.get_features()
if(not feature.sb_is_simplest and feature.sb_geom_type==GenUtil.LINE_STRING ))
for line in lines:
nbr_bend_simplified = line.simplify(self.command.diameter, s_constraints)
iter_nbr_bend_simplified += nbr_bend_simplified
total_nbr_bend_simplified += nbr_bend_simplified
print('Number of bend simplified {}'.format(iter_nbr_bend_simplified))
print('----------')
iter_nbr += 1
if iter_nbr_bend_simplified == 0:
break
print('Total number of bend simplified: {}'.format(total_nbr_bend_simplified))
print('Total number of simplicity error: {}'.format(s_constraints.nbr_err_simplicity))
print('Total number of crossing error: {}'.format(s_constraints.nbr_err_crossing))
print('Total number of sidedness error: {}'.format(s_constraints.nbr_err_sidedness))
return total_nbr_bend_simplified
def process(self):
"""Main routine for the Sherbend algorithm
The algorithm will simplify the lines using the Sherbend algorithm.
It will iterate over the lines until there are no more bends to simplify.
Parameters
----------
None
Returns
-------
geo_content : DataClass
Contains the output information
"""
# Load the features into the spatial container
self.load_features(self.geo_content, self.command)
s_constraints = SpatialConstraints(s_container=self.s_container)
self._manage_lines_simplification(s_constraints)
for feature in self.s_container.get_features():
if feature.sb_geom_type == GenUtil.POINT:
self.geo_content.out_features.append(feature)
elif feature.sb_geom_type == GenUtil.LINE_STRING:
if feature.sb_original_type == GenUtil.LINE_STRING:
self.geo_content.out_features.append(feature)
else:
if feature.sb_original_type == GenUtil.POLYGON_EXTERIOR:
# The LineString was an exterior Polygon so reconstruct the originalPolygon
interiors = [list(interior.coords) for interior in feature.sb_interiors]
polygon = Polygon(feature.coords, interiors)
polygon.sb_layer_name = feature.sb_layer_name
polygon.sb_properties = feature.sb_properties
self.geo_content.out_features.append(polygon)
else:
pass # Nothing to do with the holes here
return
| [
"shapely.geometry.LinearRing",
"shapely.affinity.scale",
"math.acos",
"lib_geosim.GenUtil.calculate_adjusted_area",
"math.sqrt",
"lib_geosim.GenUtil.angle_vecor",
"shapely.geometry.Point.coords.__set__",
"math.cos",
"shapely.geometry.Polygon",
"shapely.geometry.LineString",
"lib_geosim.GenUtil.o... | [((18132, 18166), 'shapely.geometry.Point.coords.__set__', 'Point.coords.__set__', (['self', 'coords'], {}), '(self, coords)\n', (18152, 18166), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((20591, 20676), 'shapely.affinity.scale', 'affinity.scale', (['new_sub_line'], {'xfact': '(1.0 - GenUtil.ZERO)', 'yfact': '(1.0 - GenUtil.ZERO)'}), '(new_sub_line, xfact=1.0 - GenUtil.ZERO, yfact=1.0 - GenUtil.ZERO\n )\n', (20605, 20676), False, 'from shapely import affinity\n'), ((20735, 20757), 'shapely.prepared.prep', 'prep', (['smaller_sub_line'], {}), '(smaller_sub_line)\n', (20739, 20757), False, 'from shapely.prepared import prep\n'), ((21664, 21682), 'shapely.prepared.prep', 'prep', (['new_sub_line'], {}), '(new_sub_line)\n', (21668, 21682), False, 'from shapely.prepared import prep\n'), ((22572, 22581), 'shapely.prepared.prep', 'prep', (['pol'], {}), '(pol)\n', (22576, 22581), False, 'from shapely.prepared import prep\n'), ((27488, 27551), 'shapely.affinity.affine_transform', 'affinity.affine_transform', (['sub_line', '[1, 0, 0, 1, -xoff, -yoff]'], {}), '(sub_line, [1, 0, 0, 1, -xoff, -yoff])\n', (27513, 27551), False, 'from shapely import affinity\n'), ((27852, 27883), 'lib_geosim.GenUtil.angle_vecor', 'GenUtil.angle_vecor', (['p0', 'p1', 'p2'], {}), '(p0, p1, p2)\n', (27871, 27883), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((28183, 28215), 'math.acos', 'math.acos', (['(dot / (len_a * len_b))'], {}), '(dot / (len_a * len_b))\n', (28192, 28215), False, 'import math, sys\n'), ((28443, 28458), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (28451, 28458), False, 'import math, sys\n'), ((28500, 28515), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (28508, 28515), False, 'import math, sys\n'), ((28528, 28543), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (28536, 28543), False, 'import math, sys\n'), ((28566, 28619), 'shapely.affinity.rotate', 'affinity.rotate', (['line_translate', 'angle'], {'origin': '(0, 0)'}), '(line_translate, angle, origin=(0, 0))\n', (28581, 28619), False, 'from shapely import affinity\n'), ((29110, 29150), 'shapely.geometry.LineString', 'LineString', (['lst_coord[bend_i:bend_j + 1]'], {}), '(lst_coord[bend_i:bend_j + 1])\n', (29120, 29150), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((29349, 29399), 'shapely.geometry.LineString', 'LineString', (['((x_middle, 0), (x_middle, y_dynamic))'], {}), '(((x_middle, 0), (x_middle, y_dynamic)))\n', (29359, 29399), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((29426, 29477), 'shapely.geometry.LineString', 'LineString', (['((x_middle, 0), (x_middle, -y_dynamic))'], {}), '(((x_middle, 0), (x_middle, -y_dynamic)))\n', (29436, 29477), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((32586, 32601), 'shapely.geometry.Polygon', 'Polygon', (['coords'], {}), '(coords)\n', (32593, 32601), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((32622, 32679), 'lib_geosim.GenUtil.calculate_compactness_index', 'GenUtil.calculate_compactness_index', (['pol.area', 'pol.length'], {}), '(pol.area, pol.length)\n', (32657, 32679), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((32699, 32751), 'lib_geosim.GenUtil.calculate_adjusted_area', 'GenUtil.calculate_adjusted_area', (['pol.area', 'cmp_index'], {}), '(pol.area, cmp_index)\n', (32730, 32751), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((13374, 13397), 'shapely.geometry.LinearRing', 'LinearRing', (['self.coords'], {}), '(self.coords)\n', (13384, 13397), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((28472, 28487), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (28480, 28487), False, 'import math, sys\n'), ((29979, 30037), 'shapely.geometry.LineString', 'LineString', (['(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])'], {}), '(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])\n', (29989, 30037), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((36514, 36532), 'lib_geosim.SpatialContainer', 'SpatialContainer', ([], {}), '()\n', (36530, 36532), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((24060, 24085), 'shapely.geometry.Polygon', 'Polygon', (['self.bend_coords'], {}), '(self.bend_coords)\n', (24067, 24085), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((24988, 25047), 'lib_geosim.GenUtil.distance', 'GenUtil.distance', (['self.bend_coords[0]', 'self.bend_coords[-1]'], {}), '(self.bend_coords[0], self.bend_coords[-1])\n', (25004, 25047), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((25967, 26029), 'lib_geosim.GenUtil.calculate_compactness_index', 'GenUtil.calculate_compactness_index', (['self.area', 'self.perimeter'], {}), '(self.area, self.perimeter)\n', (26002, 26029), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((26438, 26496), 'lib_geosim.GenUtil.calculate_adjusted_area', 'GenUtil.calculate_adjusted_area', (['self.area', 'self.cmp_index'], {}), '(self.area, self.cmp_index)\n', (26469, 26496), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((26915, 26970), 'shapely.geometry.LineString', 'LineString', (['(self.bend_coords[0], self.bend_coords[-1])'], {}), '((self.bend_coords[0], self.bend_coords[-1]))\n', (26925, 26970), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((30796, 30838), 'math.sqrt', 'math.sqrt', (['(hypothenus ** 2 - x_middle ** 2)'], {}), '(hypothenus ** 2 - x_middle ** 2)\n', (30805, 30838), False, 'import math, sys\n'), ((30989, 31061), 'shapely.geometry.LineString', 'LineString', (['(lst_coords[0:bend_i + 1] + [new_coord] + lst_coords[bend_j:])'], {}), '(lst_coords[0:bend_i + 1] + [new_coord] + lst_coords[bend_j:])\n', (30999, 31061), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((31147, 31205), 'shapely.geometry.LineString', 'LineString', (['(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])'], {}), '(lst_coords[0:bend_i + 1] + lst_coords[bend_j:])\n', (31157, 31205), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n'), ((5108, 5183), 'lib_geosim.GenUtil.orientation', 'GenUtil.orientation', (['self.coords[i - 1]', 'self.coords[i]', 'self.coords[i + 1]'], {}), '(self.coords[i - 1], self.coords[i], self.coords[i + 1])\n', (5127, 5183), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((5403, 5471), 'lib_geosim.GenUtil.orientation', 'GenUtil.orientation', (['self.coords[-2]', 'self.coords[0]', 'self.coords[1]'], {}), '(self.coords[-2], self.coords[0], self.coords[1])\n', (5422, 5471), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((30734, 30769), 'math.cos', 'math.cos', (['(theta_i * math.pi / 180.0)'], {}), '(theta_i * math.pi / 180.0)\n', (30742, 30769), False, 'import math, sys\n'), ((2934, 2983), 'lib_geosim.GenUtil.distance', 'GenUtil.distance', (['self.coords[0]', 'self.coords[-1]'], {}), '(self.coords[0], self.coords[-1])\n', (2950, 2983), False, 'from lib_geosim import GenUtil, PointSc, LineStringSc, SpatialContainer, GeoSimException\n'), ((40233, 40267), 'shapely.geometry.Polygon', 'Polygon', (['feature.coords', 'interiors'], {}), '(feature.coords, interiors)\n', (40240, 40267), False, 'from shapely.geometry import Point, LineString, LinearRing, Polygon\n')] |
#!/usr/bin/env python3
import socket
port = 12345
MAX_SIZE = 65535
target_address = '127.0.0.1'
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind((target_address,port))
s.listen(2)
conn, addr = s.accept()
# conn: socket is the client socket.
print(addr, "Now Connected")
text = "Thank you for connecting from TCP Server."
data = text.encode('ascii')
conn.send(data)
conn.close()
| [
"socket.socket"
] | [((103, 152), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (116, 152), False, 'import socket\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 30 20:11:19 2016
@author: stephen
"""
from __future__ import print_function
from keras.models import Model
from keras.utils import np_utils
import numpy as np
import os
from keras.callbacks import ModelCheckpoint
import pandas as pd
import sys
import keras
from keras.callbacks import ReduceLROnPlateau
def readucr(filename):
data = np.loadtxt(filename, delimiter = ',')
Y = data[:,0]
X = data[:,1:]
return X, Y
nb_epochs = 300
#flist = ['Adiac', 'Beef', 'CBF', 'ChlorineConcentration', 'CinC_ECG_torso', 'Coffee', 'Cricket_X', 'Cricket_Y', 'Cricket_Z',
#'DiatomSizeReduction', 'ECGFiveDays', 'FaceAll', 'FaceFour', 'FacesUCR', '50words', 'FISH', 'Gun_Point', 'Haptics',
#'InlineSkate', 'ItalyPowerDemand', 'Lighting2', 'Lighting7', 'MALLAT', 'MedicalImages', 'MoteStrain', 'NonInvasiveFatalECG_Thorax1',
#'NonInvasiveFatalECG_Thorax2', 'OliveOil', 'OSULeaf', 'SonyAIBORobotSurface', 'SonyAIBORobotSurfaceII', 'StarLightCurves', 'SwedishLeaf', 'Symbols',
#'synthetic_control', 'Trace', 'TwoLeadECG', 'Two_Patterns', 'uWaveGestureLibrary_X', 'uWaveGestureLibrary_Y', 'uWaveGestureLibrary_Z', 'wafer', 'WordsSynonyms', 'yoga']
flist = [ sys.argv[1] ]
for each in flist:
fname = each
x_train, y_train = readucr(fname+'/'+fname+'_TRAIN')
x_test, y_test = readucr(fname+'/'+fname+'_TEST')
nb_classes = len(np.unique(y_test))
batch_size = int(min(x_train.shape[0]/10, 16))
y_train = (y_train - y_train.min())/(y_train.max()-y_train.min())*(nb_classes-1)
y_test = (y_test - y_test.min())/(y_test.max()-y_test.min())*(nb_classes-1)
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
x_train_mean = x_train.mean()
x_train_std = x_train.std()
x_train = (x_train - x_train_mean)/(x_train_std)
x_test = (x_test - x_train_mean)/(x_train_std)
x_train = x_train.reshape(x_train.shape + (1,))
x_test = x_test.reshape(x_test.shape + (1,))
print ("class:"+each+", number of classes: "+str(nb_classes))
x = keras.layers.Input(x_train.shape[1:])
# drop_out = Dropout(0.2)(x)
conv1 = keras.layers.Conv1D(filters=32, kernel_size=8, strides=1, activation='relu', input_shape=(32,1))(x)
conv1 = keras.layers.normalization.BatchNormalization()(conv1)
conv1 = keras.layers.Activation('relu')(conv1)
# drop_out = Dropout(0.2)(conv1)
conv2 = keras.layers.Conv1D(filters=64, kernel_size=5, border_mode='same')(conv1)
conv2 = keras.layers.normalization.BatchNormalization()(conv2)
conv2 = keras.layers.Activation('relu')(conv2)
# drop_out = Dropout(0.2)(conv2)
conv3 = keras.layers.Conv1D(filters=32, kernel_size=3, border_mode='same')(conv2)
conv3 = keras.layers.normalization.BatchNormalization()(conv3)
conv3 = keras.layers.Activation('relu')(conv3)
full = keras.layers.pooling.GlobalAveragePooling1D()(conv3)
out = keras.layers.Dense(nb_classes, activation='softmax')(full)
model = Model(input=x, output=out)
optimizer = keras.optimizers.Adam()
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
reduce_lr = ReduceLROnPlateau(monitor = 'loss', factor=0.5,
patience=50, min_lr=0.0001)
# if os.path.isfile(fname+"_best.hdf5"):
# model.load_weights(fname+'_best.hdf5')
# model.load_weights(fname+'_shapelet_best.hdf5')
checkpointer = ModelCheckpoint(filepath=fname+"_best.hdf5",
monitor = 'val_accuracy',
verbose=2,
save_best_only=True)
# hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
# verbose=1, callbacks=[reduce_lr], validation_data=(x_test, Y_test))
hist = model.fit(x_train, Y_train, batch_size=batch_size, epochs=nb_epochs,
verbose=1, callbacks=[checkpointer,reduce_lr], validation_data=(x_test, Y_test))
#Print the testing results which has the lowest training loss.
log = pd.DataFrame(hist.history)
print (log.loc[log['loss'].idxmin]['loss'], log.loc[log['loss'].idxmin])
| [
"keras.optimizers.Adam",
"keras.layers.pooling.GlobalAveragePooling1D",
"numpy.unique",
"keras.callbacks.ModelCheckpoint",
"keras.layers.normalization.BatchNormalization",
"keras.callbacks.ReduceLROnPlateau",
"keras.layers.Dense",
"keras.layers.Input",
"keras.utils.np_utils.to_categorical",
"keras... | [((420, 455), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""'}), "(filename, delimiter=',')\n", (430, 455), True, 'import numpy as np\n'), ((1683, 1727), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_train', 'nb_classes'], {}), '(y_train, nb_classes)\n', (1706, 1727), False, 'from keras.utils import np_utils\n'), ((1741, 1784), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['y_test', 'nb_classes'], {}), '(y_test, nb_classes)\n', (1764, 1784), False, 'from keras.utils import np_utils\n'), ((2141, 2178), 'keras.layers.Input', 'keras.layers.Input', (['x_train.shape[1:]'], {}), '(x_train.shape[1:])\n', (2159, 2178), False, 'import keras\n'), ((3096, 3122), 'keras.models.Model', 'Model', ([], {'input': 'x', 'output': 'out'}), '(input=x, output=out)\n', (3101, 3122), False, 'from keras.models import Model\n'), ((3145, 3168), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {}), '()\n', (3166, 3168), False, 'import keras\n'), ((3321, 3394), 'keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""loss"""', 'factor': '(0.5)', 'patience': '(50)', 'min_lr': '(0.0001)'}), "(monitor='loss', factor=0.5, patience=50, min_lr=0.0001)\n", (3338, 3394), False, 'from keras.callbacks import ReduceLROnPlateau\n'), ((3584, 3690), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(fname + '_best.hdf5')", 'monitor': '"""val_accuracy"""', 'verbose': '(2)', 'save_best_only': '(True)'}), "(filepath=fname + '_best.hdf5', monitor='val_accuracy',\n verbose=2, save_best_only=True)\n", (3599, 3690), False, 'from keras.callbacks import ModelCheckpoint\n'), ((4137, 4163), 'pandas.DataFrame', 'pd.DataFrame', (['hist.history'], {}), '(hist.history)\n', (4149, 4163), True, 'import pandas as pd\n'), ((1428, 1445), 'numpy.unique', 'np.unique', (['y_test'], {}), '(y_test)\n', (1437, 1445), True, 'import numpy as np\n'), ((2224, 2325), 'keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(8)', 'strides': '(1)', 'activation': '"""relu"""', 'input_shape': '(32, 1)'}), "(filters=32, kernel_size=8, strides=1, activation='relu',\n input_shape=(32, 1))\n", (2243, 2325), False, 'import keras\n'), ((2336, 2383), 'keras.layers.normalization.BatchNormalization', 'keras.layers.normalization.BatchNormalization', ([], {}), '()\n', (2381, 2383), False, 'import keras\n'), ((2403, 2434), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2426, 2434), False, 'import keras\n'), ((2495, 2561), 'keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'border_mode': '"""same"""'}), "(filters=64, kernel_size=5, border_mode='same')\n", (2514, 2561), False, 'import keras\n'), ((2581, 2628), 'keras.layers.normalization.BatchNormalization', 'keras.layers.normalization.BatchNormalization', ([], {}), '()\n', (2626, 2628), False, 'import keras\n'), ((2648, 2679), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2671, 2679), False, 'import keras\n'), ((2740, 2806), 'keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'border_mode': '"""same"""'}), "(filters=32, kernel_size=3, border_mode='same')\n", (2759, 2806), False, 'import keras\n'), ((2826, 2873), 'keras.layers.normalization.BatchNormalization', 'keras.layers.normalization.BatchNormalization', ([], {}), '()\n', (2871, 2873), False, 'import keras\n'), ((2893, 2924), 'keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {}), "('relu')\n", (2916, 2924), False, 'import keras\n'), ((2948, 2993), 'keras.layers.pooling.GlobalAveragePooling1D', 'keras.layers.pooling.GlobalAveragePooling1D', ([], {}), '()\n', (2991, 2993), False, 'import keras\n'), ((3015, 3067), 'keras.layers.Dense', 'keras.layers.Dense', (['nb_classes'], {'activation': '"""softmax"""'}), "(nb_classes, activation='softmax')\n", (3033, 3067), False, 'import keras\n')] |
import os
indentSize=1 #size of the indent
class calcs():
def __init__(self):
self.indent=0
self.txt=[] #text for each line
def clear(self):
self.txt.clear()
self.indent=0
def addCalcs(self,calc):
s=[' ' * self.indent+ t for t in calc.txt]
self.txt += s
def addText(self,txt):
txt=' ' * self.indent + txt
self.txt.append(txt)
def show(self):
return os.linesep.join(self.txt)
def inDent(self):
self.indent+=indentSize
def outDent(self):
if self.indent-indentSize>0:
self.indent-=indentSize | [
"os.linesep.join"
] | [((447, 472), 'os.linesep.join', 'os.linesep.join', (['self.txt'], {}), '(self.txt)\n', (462, 472), False, 'import os\n')] |
#!/usr/bin/env python
from __future__ import division
"""MODULE_DESCRIPTION"""
__author__ = "<NAME>"
__copyright__ = "Copyright 2015, Cohrint"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
import logging
from copy import deepcopy
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
class Probability(object):
"""Abstract base class for probability representation (grid, particle, etc)
long description of Probability
Parameters
----------
bounds : Array-like
Bounding coordinates for the probability map.
res : float
Resolution used for discretization of the probability map.
"""
def __init__(self, bounds, res):
self.bounds = bounds
self.ndims = int(len(bounds) / 2)
self.res = res
def entropy(self):
"""
"""
# <>TODO: figure this out. Look at papers!
# http://www-personal.acfr.usyd.edu.au/tbailey/papers/mfi08_huber.pdf
if not hasattr(self, 'pos'):
self._discretize()
if not hasattr(self, 'prob'):
self.pdf()
p_i = self.prob #TODO: change to 4 dims.
H = -np.nansum(p_i * np.log(p_i)) * self.res ** self.ndims # sum of elementwise entropy values
return H
def compute_kld(self, other_gm):
"""Computes the KLD of self from another GM.
Use a truth GM as other_gm.
"""
q_i = self.prob
p_i = other_gm.prob
kld = np.nansum(p_i * np.log(p_i / q_i)) * self.res ** self.ndims
return kld
# def _discretize(self, bounds=None, res=None, all_dims=False):
# if res is not None:
# self.res = res
# if bounds is None and self.bounds is None:
# b = [-10, 10] # bounds in any dimension
# bounds = [[d] * self.ndims for d in b] # apply bounds to each dim
# self.bounds = [d for dim in bounds for d in dim] # flatten bounds
# elif self.bounds is None:
# self.bounds = bounds
# # Create grid
# if self.ndims == 1:
# x = np.arange(self.bounds[0], self.bounds[1], res)
# self.x = x
# self.pos = x
# elif self.ndims == 2:
# X, Y = np.mgrid[self.bounds[0]:self.bounds[2] + self.res:self.res,
# self.bounds[1]:self.bounds[3] + self.res:self.res]
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X; pos[:, :, 1] = Y
# self.X = X; self.Y = Y
# self.pos = pos
# elif self.ndims > 2:
# logging.debug('Using first two variables as x and y')
# X, Y = np.mgrid[self.bounds[0]:self.bounds[2]
# + res:res,
# self.bounds[1]:self.bounds[3]
# + res:res]
# pos = np.empty(X.shape + (2,))
# pos[:, :, 0] = X; pos[:, :, 1] = Y
# self.X = X; self.Y = Y
# self.pos = pos
# if all_dims:
# #<>TODO: use more than the ndims == 4 case
# full_bounds = self.bounds[0:2] + [-0.5, -0.5] \
# + self.bounds[2:] + [0.5, 0.5]
# v_spacing = 0.1
# grid = np.mgrid[full_bounds[0]:full_bounds[4] + res:res,
# full_bounds[1]:full_bounds[5] + res:res,
# full_bounds[2]:full_bounds[6] + v_spacing:v_spacing,
# full_bounds[3]:full_bounds[7] + v_spacing:v_spacing,
# ]
# pos = np.empty(grid[0].shape + (4,))
# pos[:, :, :, :, 0] = grid[0]
# pos[:, :, :, :, 1] = grid[1]
# pos[:, :, :, :, 2] = grid[2]
# pos[:, :, :, :, 3] = grid[3]
# self.pos_all = pos
# else:
# logging.error('This should be impossible, a gauss mixture with no variables')
# raise ValueError
def plot(self, title=None, alpha=1.0, show_colorbar=True, **kwargs):
if not hasattr(self,'ax') or 'ax' in kwargs:
self.plot_setup(**kwargs)
if title is None:
title = self.__str__()
self.contourf = self.ax.contourf(self.X, self.Y,
self.prob,
levels=self.levels,
# cmap=plt.get_cmap('jet'),
alpha=alpha,
interpolation='none',
antialiased=False
)
if show_colorbar and not hasattr(self, 'cbar'):
divider = make_axes_locatable(self.ax)
cax = divider.append_axes("right", size="5%", pad=0.1)
cbar = plt.colorbar(self.contourf, cax)
cbar.ax.tick_params(labelsize=20)
self.cbar = cbar
self.ax.set_title(title, fontsize=20)
if self.show_ellipses:
if hasattr(self.distribution, 'camera_viewcone'):
poly = self.distribution.camera_viewcone
else:
poly = None
self.ellipse_patches = distribution.plot_ellipses(ax=self.ax,
poly=poly)
return self.contourf
def plot_setup(self, fig=None, ax=None, bounds=None, levels=None,
num_levels=50, resolution=0.1, show_ellipses=False):
self.show_ellipses = show_ellipses
if fig is None:
self.fig = plt.gcf()
else:
self.fig = fig
if ax is None:
self.ax = plt.gca()
else:
self.ax = ax
if bounds is None:
bounds = self.bounds
if not hasattr(self,'pos'):
self._discretize(bounds=bounds)
# Set levels
if levels is None:
_, max_prob = self.find_MAP()
self.levels = np.linspace(0, max_prob * 1.2, num_levels)
else:
self.levels = levels
# Set bounds
plt.axis('scaled')
self.ax.set_xlim([bounds[0], bounds[2]])
self.ax.set_ylim([bounds[1], bounds[3]])
def plot_remove(self):
"""Removes all plotted elements related to this gaussian mixture.
"""
if hasattr(self,'contourf'):
for collection in self.contourf.collections:
collection.remove()
del self.contourf
if hasattr(self, 'ellipse_patches'):
for patch in self.ellipse_patches:
patch.remove()
del self.ellipse_patches
def update_plot(self, i=0, **kwargs):
logging.debug('Probability update {}'.format(i))
self.plot_remove()
self.plot(**kwargs)
def copy(self):
return deepcopy(self) | [
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.colorbar",
"numpy.log",
"numpy.linspace",
"mpl_toolkits.axes_grid1.make_axes_locatable",
"copy.deepcopy",
"matplotlib.pyplot.axis"
] | [((6354, 6372), 'matplotlib.pyplot.axis', 'plt.axis', (['"""scaled"""'], {}), "('scaled')\n", (6362, 6372), True, 'import matplotlib.pyplot as plt\n'), ((7099, 7113), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (7107, 7113), False, 'from copy import deepcopy\n'), ((4944, 4972), 'mpl_toolkits.axes_grid1.make_axes_locatable', 'make_axes_locatable', (['self.ax'], {}), '(self.ax)\n', (4963, 4972), False, 'from mpl_toolkits.axes_grid1 import make_axes_locatable\n'), ((5059, 5091), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['self.contourf', 'cax'], {}), '(self.contourf, cax)\n', (5071, 5091), True, 'import matplotlib.pyplot as plt\n'), ((5821, 5830), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (5828, 5830), True, 'import matplotlib.pyplot as plt\n'), ((5918, 5927), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (5925, 5927), True, 'import matplotlib.pyplot as plt\n'), ((6226, 6268), 'numpy.linspace', 'np.linspace', (['(0)', '(max_prob * 1.2)', 'num_levels'], {}), '(0, max_prob * 1.2, num_levels)\n', (6237, 6268), True, 'import numpy as np\n'), ((1629, 1646), 'numpy.log', 'np.log', (['(p_i / q_i)'], {}), '(p_i / q_i)\n', (1635, 1646), True, 'import numpy as np\n'), ((1314, 1325), 'numpy.log', 'np.log', (['p_i'], {}), '(p_i)\n', (1320, 1325), True, 'import numpy as np\n')] |
from __future__ import print_function
import numpy as np
import pandas as pd
from sklearn import metrics
class Options(object):
"""Options used by the model."""
def __init__(self):
# Model options.
# Embedding dimension.
self.embedding_size = 32
# The initial learning rate.
self.learning_rate = 1.
# Number of epochs to train. After these many epochs, the learning
# rate decays linearly to zero and the training stops.
self.epochs_to_train = 100
# Number of examples for one training step.
self.batch_size = 128
self.log_path = './ctr.log'
def read_file(path, infinite=True):
while True:
fi = open(path,'r')
for line in fi:
yield map(int,line.replace('\n', '').split(' '))
if infinite == False:
break
yield None
def ctr_batch_generator(opts, train=True):
if train:
file_reader = read_file(opts.train_path, True)
else:
file_reader = read_file(opts.test_path, False)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = np.ndarray(shape=(opts.batch_size))
for i in xrange(opts.batch_size):
single_sample = file_reader.next()
if single_sample is None:
break
target = single_sample[0]
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels[i] = target
if len(labels) == opts.batch_size and single_sample is not None:
yield np.array(batch), labels
else:
break
def get_substitute_cate(sample, target_index, opts):
field_i = opts.fields_index_inverse.get(sample[target_index])
if field_i is None:
field_i = np.random.choice(opts.fields_index.keys(),1)[0]
field_cates = opts.fields_index[field_i]
rst = np.random.choice(field_cates,1)[0]
if len(field_cates) == 1:
rst = np.random.randint(opts.vocabulary_size)
return rst
def generate_fake_sample(temp, opts):
temp_sequence_length = len(temp)
temp = temp[0:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
else:
temp_sequence_length = opts.sequence_length
assert len(temp) == opts.sequence_length
targets_to_avoid = set(temp)
indices_to_avoid = set()
substitute_index = np.random.randint(temp_sequence_length)
substitute_target = get_substitute_cate(temp, substitute_index, opts)
for _ in range(opts.substitute_num):
while substitute_index in indices_to_avoid:
substitute_index = np.random.randint(temp_sequence_length)
indices_to_avoid.add(substitute_index)
count = 0
while substitute_target in targets_to_avoid:
if count > 5:
break
substitute_target = get_substitute_cate(temp, substitute_index, opts)
count += 1
targets_to_avoid.add(substitute_target)
temp[substitute_index] = substitute_target
return temp
def generate_discriminant_batch(opts, is_train=True, rate=0.5):
data_index = 0
if is_train:
file_reader = read_file(opts.train_path)
else:
file_reader = read_file(opts.test_path)
while True:
batch = np.ndarray(shape=(opts.batch_size, opts.sequence_length))
labels = []
for i in xrange(opts.batch_size):
if np.random.random() > rate:
single_sample = file_reader.next()
temp = single_sample[1:opts.sequence_length]
if len(temp) < opts.sequence_length:
gap = opts.sequence_length - len(temp)
temp = np.array(temp + [0] * gap)
assert len(temp) == opts.sequence_length
batch[i] = temp
labels.append(1.)
else:
single_sample = file_reader.next()
temp = single_sample[1:opts.sequence_length]
batch[i] = generate_fake_sample(temp, opts)
labels.append(0.)
yield batch, np.array(labels)
def read_feat_index(opts):
vocabulary_size = 0
reverse_dictionary_raw = np.array(pd.read_csv(opts.featindex, sep='\t', header=None))
reverse_dictionary = {}
dictionary = {}
for item in reverse_dictionary_raw:
reverse_dictionary[int(item[1])] = item[0]
dictionary[item[0]] = int(item[1])
if item[1] > vocabulary_size:
vocabulary_size = item[1]
vocabulary_size = len(dictionary.keys())
print('vocabulary_size: ',vocabulary_size)
return reverse_dictionary, dictionary, vocabulary_size
def eval_auc(model, opts, target=None, get_prob=None):
testing_batch_generator = ctr_batch_generator(opts,train=False)
batch_num = 0
y = []
pred = []
for batch, labels in testing_batch_generator:
if target is None or get_prob is None:
probs = model.predict_proba(batch, batch_size=opts.batch_size, verbose=0)
else:
probs = get_prob([batch])[0]
y.extend(labels)
pred.extend([p[0] for p in probs])
batch_num += 1
fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
loss = metrics.log_loss(y, pred)
print("Total testing sample: ", len(y), " Positive sample: ", sum(y))
opts.auc = auc
opts.loss = loss
with open(opts.log_path, 'a') as f:
f.write(str(opts.__dict__)+'\r')
print("AUC:", auc, ', log loss: ', loss) | [
"pandas.read_csv",
"numpy.random.choice",
"sklearn.metrics.auc",
"numpy.random.random",
"numpy.array",
"numpy.random.randint",
"sklearn.metrics.log_loss",
"sklearn.metrics.roc_curve",
"numpy.ndarray"
] | [((2763, 2802), 'numpy.random.randint', 'np.random.randint', (['temp_sequence_length'], {}), '(temp_sequence_length)\n', (2780, 2802), True, 'import numpy as np\n'), ((5627, 5666), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['y', 'pred'], {'pos_label': '(1)'}), '(y, pred, pos_label=1)\n', (5644, 5666), False, 'from sklearn import metrics\n'), ((5677, 5698), 'sklearn.metrics.auc', 'metrics.auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (5688, 5698), False, 'from sklearn import metrics\n'), ((5710, 5735), 'sklearn.metrics.log_loss', 'metrics.log_loss', (['y', 'pred'], {}), '(y, pred)\n', (5726, 5735), False, 'from sklearn import metrics\n'), ((1101, 1158), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(opts.batch_size, opts.sequence_length)'}), '(shape=(opts.batch_size, opts.sequence_length))\n', (1111, 1158), True, 'import numpy as np\n'), ((1176, 1209), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'opts.batch_size'}), '(shape=opts.batch_size)\n', (1186, 1209), True, 'import numpy as np\n'), ((2147, 2179), 'numpy.random.choice', 'np.random.choice', (['field_cates', '(1)'], {}), '(field_cates, 1)\n', (2163, 2179), True, 'import numpy as np\n'), ((2226, 2265), 'numpy.random.randint', 'np.random.randint', (['opts.vocabulary_size'], {}), '(opts.vocabulary_size)\n', (2243, 2265), True, 'import numpy as np\n'), ((2520, 2546), 'numpy.array', 'np.array', (['(temp + [0] * gap)'], {}), '(temp + [0] * gap)\n', (2528, 2546), True, 'import numpy as np\n'), ((3725, 3782), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(opts.batch_size, opts.sequence_length)'}), '(shape=(opts.batch_size, opts.sequence_length))\n', (3735, 3782), True, 'import numpy as np\n'), ((4650, 4700), 'pandas.read_csv', 'pd.read_csv', (['opts.featindex'], {'sep': '"""\t"""', 'header': 'None'}), "(opts.featindex, sep='\\t', header=None)\n", (4661, 4700), True, 'import pandas as pd\n'), ((3017, 3056), 'numpy.random.randint', 'np.random.randint', (['temp_sequence_length'], {}), '(temp_sequence_length)\n', (3034, 3056), True, 'import numpy as np\n'), ((1584, 1610), 'numpy.array', 'np.array', (['(temp + [0] * gap)'], {}), '(temp + [0] * gap)\n', (1592, 1610), True, 'import numpy as np\n'), ((3860, 3878), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (3876, 3878), True, 'import numpy as np\n'), ((4535, 4551), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (4543, 4551), True, 'import numpy as np\n'), ((1814, 1829), 'numpy.array', 'np.array', (['batch'], {}), '(batch)\n', (1822, 1829), True, 'import numpy as np\n'), ((4139, 4165), 'numpy.array', 'np.array', (['(temp + [0] * gap)'], {}), '(temp + [0] * gap)\n', (4147, 4165), True, 'import numpy as np\n')] |
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
import versioneer
def read(path):
"""
Read the contents of a file.
"""
with open(path) as f:
return f.read()
setup(
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
],
name='crochet',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Use Twisted anywhere!",
install_requires=[
"Twisted>=15.0",
"wrapt",
],
keywords="twisted threading",
license="MIT",
packages=["crochet", "crochet.tests"],
url="https://github.com/itamarst/crochet",
maintainer='<NAME>',
maintainer_email='<EMAIL>',
long_description=read('README.rst') + '\n' + read('docs/news.rst'),
)
| [
"versioneer.get_cmdclass",
"versioneer.get_version"
] | [((813, 837), 'versioneer.get_version', 'versioneer.get_version', ([], {}), '()\n', (835, 837), False, 'import versioneer\n'), ((852, 877), 'versioneer.get_cmdclass', 'versioneer.get_cmdclass', ([], {}), '()\n', (875, 877), False, 'import versioneer\n')] |
from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError
from tkinter.messagebox import showerror, showwarning
from client import Client
from threading import Thread
from socket import error as socket_error
destroy = False
def on_closing():
global destroy
destroy = True
try:
client.send_server("quit")
except TclError:
pass
finally:
try:
tchat.destroy()
except TclError:
pass
def start():
if host.get() and port.get():
try:
global client
client = Client(host.get(), port.get())
except (socket_error, ConnectionError):
showerror("Error", "Can't connect to server !")
else:
login.destroy()
def receive():
while True:
try:
msg = client.receive_server()
if msg.lower() == "quit" or not msg:
raise ConnectionError("Client quit")
except (socket_error, ConnectionError, AttributeError):
show_message("""}------------------------------{
/!\\ [Receive system offline] /!\\
Press Enter to exit
}------------------------------{""")
break
else:
show_message(msg)
def send(event=None):
try:
client.send_server(message.get())
if not receive_thread.is_alive() or message.get().lower() == "quit":
raise ConnectionError("Client quit")
except (socket_error, ConnectionError):
showwarning("Disconnected", "Disconnected from server")
on_closing()
else:
message.set("")
def show_message(msg):
if msg[-1:] != "\n":
msg += "\n"
if not destroy:
chat_message.configure(state="normal")
chat_message.insert("end", msg)
chat_message.configure(state="disable")
login = Tk()
login.title("Login")
host = StringVar()
port = IntVar()
Label(login, text="Host & port:").pack()
login_f = Frame(login)
login_f.pack()
Entry(login_f, textvariable=host, width=14).grid(row=0, column=0)
Entry(login_f, textvariable=port, width=4).grid(row=0, column=1)
Button(login, text="Submit", command=start).pack()
login.mainloop()
tchat = Tk()
tchat.title("PyTchat")
tchat.protocol("WM_DELETE_WINDOW", on_closing)
chat = Frame(tchat)
chat.pack()
scrollbar = Scrollbar(chat)
scrollbar.pack(side="right", fill="y")
chat_message = Text(chat, height=15, width=50, yscrollcommand=scrollbar.set, state="disable")
chat_message.pack(side="left", fill="both")
receive_thread = Thread(target=receive)
receive_thread.start()
entry = Frame(tchat)
entry.pack()
message = StringVar()
field = Entry(entry, textvariable=message)
field.bind("<Return>", send)
field.grid(row=0, column=0)
Button(entry, text="Send", command=send).grid(row=0, column=1)
tchat.mainloop()
| [
"tkinter.messagebox.showwarning",
"tkinter.IntVar",
"tkinter.messagebox.showerror",
"tkinter.Entry",
"tkinter.Button",
"tkinter.StringVar",
"tkinter.Tk",
"tkinter.Scrollbar",
"tkinter.Label",
"threading.Thread",
"tkinter.Text",
"tkinter.Frame"
] | [((1855, 1859), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (1857, 1859), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((1888, 1899), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (1897, 1899), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((1907, 1915), 'tkinter.IntVar', 'IntVar', ([], {}), '()\n', (1913, 1915), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((1967, 1979), 'tkinter.Frame', 'Frame', (['login'], {}), '(login)\n', (1972, 1979), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2203, 2207), 'tkinter.Tk', 'Tk', ([], {}), '()\n', (2205, 2207), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2285, 2297), 'tkinter.Frame', 'Frame', (['tchat'], {}), '(tchat)\n', (2290, 2297), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2322, 2337), 'tkinter.Scrollbar', 'Scrollbar', (['chat'], {}), '(chat)\n', (2331, 2337), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2392, 2470), 'tkinter.Text', 'Text', (['chat'], {'height': '(15)', 'width': '(50)', 'yscrollcommand': 'scrollbar.set', 'state': '"""disable"""'}), "(chat, height=15, width=50, yscrollcommand=scrollbar.set, state='disable')\n", (2396, 2470), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2532, 2554), 'threading.Thread', 'Thread', ([], {'target': 'receive'}), '(target=receive)\n', (2538, 2554), False, 'from threading import Thread\n'), ((2587, 2599), 'tkinter.Frame', 'Frame', (['tchat'], {}), '(tchat)\n', (2592, 2599), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2623, 2634), 'tkinter.StringVar', 'StringVar', ([], {}), '()\n', (2632, 2634), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2643, 2677), 'tkinter.Entry', 'Entry', (['entry'], {'textvariable': 'message'}), '(entry, textvariable=message)\n', (2648, 2677), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((1916, 1949), 'tkinter.Label', 'Label', (['login'], {'text': '"""Host & port:"""'}), "(login, text='Host & port:')\n", (1921, 1949), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((1995, 2038), 'tkinter.Entry', 'Entry', (['login_f'], {'textvariable': 'host', 'width': '(14)'}), '(login_f, textvariable=host, width=14)\n', (2000, 2038), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2061, 2103), 'tkinter.Entry', 'Entry', (['login_f'], {'textvariable': 'port', 'width': '(4)'}), '(login_f, textvariable=port, width=4)\n', (2066, 2103), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2126, 2169), 'tkinter.Button', 'Button', (['login'], {'text': '"""Submit"""', 'command': 'start'}), "(login, text='Submit', command=start)\n", (2132, 2169), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((2735, 2775), 'tkinter.Button', 'Button', (['entry'], {'text': '"""Send"""', 'command': 'send'}), "(entry, text='Send', command=send)\n", (2741, 2775), False, 'from tkinter import Tk, Frame, Scrollbar, Label, Text, Button, Entry, StringVar, IntVar, TclError\n'), ((1509, 1564), 'tkinter.messagebox.showwarning', 'showwarning', (['"""Disconnected"""', '"""Disconnected from server"""'], {}), "('Disconnected', 'Disconnected from server')\n", (1520, 1564), False, 'from tkinter.messagebox import showerror, showwarning\n'), ((697, 744), 'tkinter.messagebox.showerror', 'showerror', (['"""Error"""', '"""Can\'t connect to server !"""'], {}), '(\'Error\', "Can\'t connect to server !")\n', (706, 744), False, 'from tkinter.messagebox import showerror, showwarning\n')] |
import inspect
import threading
import time
from six.moves import urllib
from ..errors import ConfigurationError
from ..util import get_dependency
from .base import Storage
class MemcachedStorage(Storage):
"""
Rate limit storage with memcached as backend.
Depends on the `pymemcache` library.
"""
MAX_CAS_RETRIES = 10
STORAGE_SCHEME = ["memcached"]
def __init__(self, uri, **options):
"""
:param str uri: memcached location of the form
`memcached://host:port,host:port`, `memcached:///var/tmp/path/to/sock`
:param options: all remaining keyword arguments are passed
directly to the constructor of :class:`pymemcache.client.base.Client`
:raise ConfigurationError: when `pymemcache` is not available
"""
parsed = urllib.parse.urlparse(uri)
self.hosts = []
for loc in parsed.netloc.strip().split(","):
if not loc:
continue
host, port = loc.split(":")
self.hosts.append((host, int(port)))
else:
# filesystem path to UDS
if parsed.path and not parsed.netloc and not parsed.port:
self.hosts = [parsed.path]
self.library = options.pop('library', 'pymemcache.client')
self.cluster_library = options.pop('library', 'pymemcache.client.hash')
self.client_getter = options.pop('client_getter', self.get_client)
self.options = options
if not get_dependency(self.library):
raise ConfigurationError(
"memcached prerequisite not available."
" please install %s" % self.library
) # pragma: no cover
self.local_storage = threading.local()
self.local_storage.storage = None
def get_client(self, module, hosts, **kwargs):
"""
returns a memcached client.
:param module: the memcached module
:param hosts: list of memcached hosts
:return:
"""
return (
module.HashClient(hosts, **kwargs)
if len(hosts) > 1 else module.Client(*hosts, **kwargs)
)
def call_memcached_func(self, func, *args, **kwargs):
if 'noreply' in kwargs:
argspec = inspect.getargspec(func)
if not ('noreply' in argspec.args or argspec.keywords):
kwargs.pop('noreply') # noqa
return func(*args, **kwargs)
@property
def storage(self):
"""
lazily creates a memcached client instance using a thread local
"""
if not (
hasattr(self.local_storage, "storage")
and self.local_storage.storage
):
self.local_storage.storage = self.client_getter(
get_dependency(
self.cluster_library if len(self.hosts) > 1
else self.library
),
self.hosts, **self.options
)
return self.local_storage.storage
def get(self, key):
"""
:param str key: the key to get the counter value for
"""
return int(self.storage.get(key) or 0)
def clear(self, key):
"""
:param str key: the key to clear rate async_limits for
"""
self.storage.delete(key)
def incr(self, key, expiry, elastic_expiry=False):
"""
increments the counter for a given rate limit key
:param str key: the key to increment
:param int expiry: amount in seconds for the key to expire in
:param bool elastic_expiry: whether to keep extending the rate limit
window every hit.
"""
if not self.call_memcached_func(
self.storage.add, key, 1, expiry, noreply=False
):
if elastic_expiry:
value, cas = self.storage.gets(key)
retry = 0
while (
not self.call_memcached_func(
self.storage.cas, key,
int(value or 0) + 1, cas, expiry
) and retry < self.MAX_CAS_RETRIES
):
value, cas = self.storage.gets(key)
retry += 1
self.call_memcached_func(
self.storage.set,
key + "/expires",
expiry + time.time(),
expire=expiry,
noreply=False
)
return int(value or 0) + 1
else:
return self.storage.incr(key, 1)
self.call_memcached_func(
self.storage.set,
key + "/expires",
expiry + time.time(),
expire=expiry,
noreply=False
)
return 1
def get_expiry(self, key):
"""
:param str key: the key to get the expiry for
"""
return int(float(self.storage.get(key + "/expires") or time.time()))
def check(self):
"""
check if storage is healthy
"""
try:
self.call_memcached_func(self.storage.get, 'limiter-check')
return True
except: # noqa
return False
| [
"six.moves.urllib.parse.urlparse",
"time.time",
"threading.local",
"inspect.getargspec"
] | [((811, 837), 'six.moves.urllib.parse.urlparse', 'urllib.parse.urlparse', (['uri'], {}), '(uri)\n', (832, 837), False, 'from six.moves import urllib\n'), ((1726, 1743), 'threading.local', 'threading.local', ([], {}), '()\n', (1741, 1743), False, 'import threading\n'), ((2259, 2283), 'inspect.getargspec', 'inspect.getargspec', (['func'], {}), '(func)\n', (2277, 2283), False, 'import inspect\n'), ((4707, 4718), 'time.time', 'time.time', ([], {}), '()\n', (4716, 4718), False, 'import time\n'), ((4973, 4984), 'time.time', 'time.time', ([], {}), '()\n', (4982, 4984), False, 'import time\n'), ((4382, 4393), 'time.time', 'time.time', ([], {}), '()\n', (4391, 4393), False, 'import time\n')] |
from __future__ import division
import pandas as pd
import numpy as np
import calendar
import os.path as op
import sys
from datetime import datetime
from dateutil.relativedelta import relativedelta
from scipy.stats import percentileofscore
from scipy.stats import scoreatpercentile, pearsonr
from math import *
import time
from BCSD_stats_functions import *
import xarray as xr
import os, errno
def CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, MONTH_NAME, count_grid, BC_VAR, TINY):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM))*-999
for LEAD_NUM in range(0, LEAD_FINAL): ## Loop from lead =0 to Final Lead
TARGET_MONTH = MON + LEAD_NUM; ## This is the target forecast month
## Check for the cases when the target forecast month is in the next year (e.g. February 1983 forecast initialized in December 1982)
if (TARGET_MONTH>12):
TARGET_MONTH-=12 #subtracting 12 so 13 becomes 1 meaning the month of January and so on.
## Just checking if the lead and target month combination is working as expected
if (count_grid==0): #Only printing the following for the first grid cell, no need to repeat
print ("Initial forecast month is {} Lead is {} and Target month is {}".format(MONTH_NAME, LEAD_NUM, calendar.month_name[TARGET_MONTH]))
# Retriving Observed and forecast time series for given target month
OBS_QUANT_TS, OBS_CLIM_TS = OBS_CLIM_ALL[0, :], OBS_CLIM_ALL[TARGET_MONTH, :] ## Note that the first column is quantile time series
FCST_QUANT_TS, FCST_CLIM_TS = FCST_CLIM_ALL[0, :], FCST_CLIM_ALL[LEAD_NUM+1, :] ## Note that the first column is quantile time series
## Now calculating mean, standard deviation and skew of both observed and forecast time series
obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS, TINY)
fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS, TINY)
#obs_mean, obs_sd, obs_skew = Calc_Stats(OBS_CLIM_TS.values, TINY)
#fcst_mean, fcst_sd, fcst_skew = Calc_Stats(FCST_CLIM_TS.values, TINY)
## Ok, now getting started on the bias correction
## Note that bias correction is done seprately for each ensemble member of all years
for fcst_yr in range(TARGET_FCST_SYR-FCST_SYR, (TARGET_FCST_EYR-FCST_SYR)+1):
for ens_num in range (0, ENS_NUM):
TARGET_FCST_VAL = TARGET_FCST_VAL_ARR[fcst_yr, LEAD_NUM, ens_num]
## First determine the quantile for given target forecast value
TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS, FCST_QUANT_TS, len(FCST_CLIM_TS), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
#TARGET_FCST_QUANT = lookup(TARGET_FCST_VAL, FCST_CLIM_TS.values, FCST_QUANT_TS.values, len(FCST_CLIM_TS.values), BC_VAR, 'QUAN', fcst_mean, fcst_sd, fcst_skew, TINY);
## Also note that QUAN helps the the function lookup determine if we are trying to convert a value to quantile or VICE versa
## For converting a value to quantile use 'QUAN' for converting quantile to value use 'DATA'
## Now using the quantile above determine the corresponding value from the observed climatology
BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS, OBS_CLIM_TS, len(OBS_CLIM_TS), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
#BIAS_CORRECTED_VALUE = lookup(TARGET_FCST_QUANT, OBS_QUANT_TS.values, OBS_CLIM_TS.values, len(OBS_CLIM_TS.values), BC_VAR, 'DATA', obs_mean, obs_sd, obs_skew, TINY);
if (BC_VAR=='PRCP') and (BIAS_CORRECTED_VALUE<0): ## This is just a hack to check we are not getting negative value of precipitation
print (TARGET_FCST_VAL, TARGET_FCST_QUANT, fcst_yr, LEAD_NUM, ens_num)
## Now storing the bias corrected anomaly
CORRECT_FCST_COARSE[fcst_yr, LEAD_NUM, ens_num] = BIAS_CORRECTED_VALUE
return CORRECT_FCST_COARSE
def latlon_calculations(ilat_min, ilat_max, ilon_min, ilon_max, nlats, nlons, \
np_OBS_CLIM_ARRAY, np_FCST_CLIM_ARRAY, \
LEAD_FINAL, TARGET_FCST_EYR, TARGET_FCST_SYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, BC_VAR, TINY, FCST_COARSE):
CORRECT_FCST_COARSE = np.ones(((TARGET_FCST_EYR-TARGET_FCST_SYR)+1, LEAD_FINAL, ENS_NUM, nlats, nlons))*-999
num_lats = ilat_max-ilat_min+1
num_lons = ilon_max-ilon_min+1
print("num_lats = ", num_lats, np_OBS_CLIM_ARRAY.shape)
print("num_lons = ", num_lons, FCST_COARSE.shape)
for ilat in range(num_lats):
lat_num = ilat_min + ilat
for ilon in range(num_lons):
lon_num = ilon_min + ilon
count_grid = ilon + ilat*num_lons
OBS_CLIM_ALL = np_OBS_CLIM_ARRAY[:, :, ilat, ilon]
FCST_CLIM_ALL = np_FCST_CLIM_ARRAY[:, :, ilat, ilon]
TARGET_FCST_VAL_ARR = FCST_COARSE[:, :, :, lat_num, lon_num]
CORRECT_FCST_COARSE[:, :, :, lat_num, lon_num] = CALC_BCSD(OBS_CLIM_ALL, FCST_CLIM_ALL, LEAD_FINAL, \
TARGET_FCST_VAL_ARR, TARGET_FCST_SYR, \
TARGET_FCST_EYR, FCST_SYR, ENS_NUM, MON, \
MONTH_NAME, count_grid, BC_VAR, TINY)
return CORRECT_FCST_COARSE
| [
"numpy.ones"
] | [((598, 667), 'numpy.ones', 'np.ones', (['(TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM)'], {}), '((TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM))\n', (605, 667), True, 'import numpy as np\n'), ((4583, 4670), 'numpy.ones', 'np.ones', (['(TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM, nlats, nlons)'], {}), '((TARGET_FCST_EYR - TARGET_FCST_SYR + 1, LEAD_FINAL, ENS_NUM, nlats,\n nlons))\n', (4590, 4670), True, 'import numpy as np\n')] |
###################################
# Created on 22:20, Nov. 16th, 2020
# Author: fassial
# Filename: utils.py
###################################
# dep
import os
import pandas as pd
import scanpy as sp
from collections import defaultdict
# local dep
# macro
# def get_data_lm func
def get_data_lm(sce_fname, sparse = False):
# read sce
sce = sp.read_loom(
sce_fname,
sparse = sparse
)
return sce.to_df()
# def get_data_csv func
def get_data_csv(sce_fname):
# read sce
sce = pd.read_csv(sce_fname,
sep = ',',
header = 0,
index_col = 0
)
return sce
# def UTILS_GET_DATA_FUNC dict
UTILS_GET_DATA_FUNC = defaultdict(lambda : get_data_csv, {
".loom": get_data_lm,
".csv": get_data_csv
})
# def get_data func
def get_data(sce_fname):
sce = UTILS_GET_DATA_FUNC[os.path.splitext(sce_fname)[1]](
sce_fname = sce_fname
)
return sce
| [
"os.path.splitext",
"collections.defaultdict",
"pandas.read_csv",
"scanpy.read_loom"
] | [((677, 762), 'collections.defaultdict', 'defaultdict', (['(lambda : get_data_csv)', "{'.loom': get_data_lm, '.csv': get_data_csv}"], {}), "(lambda : get_data_csv, {'.loom': get_data_lm, '.csv': get_data_csv}\n )\n", (688, 762), False, 'from collections import defaultdict\n'), ((353, 391), 'scanpy.read_loom', 'sp.read_loom', (['sce_fname'], {'sparse': 'sparse'}), '(sce_fname, sparse=sparse)\n', (365, 391), True, 'import scanpy as sp\n'), ((518, 572), 'pandas.read_csv', 'pd.read_csv', (['sce_fname'], {'sep': '""","""', 'header': '(0)', 'index_col': '(0)'}), "(sce_fname, sep=',', header=0, index_col=0)\n", (529, 572), True, 'import pandas as pd\n'), ((844, 871), 'os.path.splitext', 'os.path.splitext', (['sce_fname'], {}), '(sce_fname)\n', (860, 871), False, 'import os\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) 2016 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import base64
import bs4
import cairosvg
import contextlib
import dotenv
import flask
import functools
import imghdr
import io
import json
import os
import pickle
import PIL.Image
import random
import re
import requests
import traceback
import tweepy
import unicodedata
import urllib.parse
import xml.etree.ElementTree as ET
dotenv.load_dotenv()
FALLBACK_PNG = open("letter-icons/x.png", "rb").read()
LINK_REL_PATTERNS = [
re.compile("^apple-touch-icon$"),
re.compile("^apple-touch-icon-precomposed$"),
re.compile("^icon$"),
re.compile("^shortcut icon$"),
]
app = flask.Flask(__name__)
blacklist = set()
if app.config["ENV"] == "production":
import redis
cache = redis.from_url(os.environ["REDISCLOUD_URL"])
else:
import redislite
cache = redislite.Redis()
# Cache HTTP connections for better performance.
# https://urllib3.readthedocs.io/en/latest/advanced-usage.html#customizing-pool-behavior
adapter = requests.adapters.HTTPAdapter(pool_connections=10,
pool_maxsize=100,
max_retries=0,
pool_block=False)
rs = requests.Session()
rs.headers = {"User-Agent": "Mozilla/5.0"}
rs.mount("http://", adapter)
rs.mount("https://", adapter)
@app.route("/facebook-icon")
def facebook_icon():
"""Return a downscaled Facebook profile image."""
user = flask.request.args["user"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "facebook-icon:{}:{:d}".format(user, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
url = "https://graph.facebook.com/{user}/picture?type=large"
url = url.format(user=urllib.parse.quote(user))
try:
print("Requesting {}".format(url))
image = request_image(url, max_size=5)
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, size)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
@app.route("/favicon")
def favicon():
"""Return a 16x16 favicon for website."""
domain = flask.request.args["url"]
domain = re.sub("/.*$", "", re.sub("^.*?://", "", domain))
format = flask.request.args.get("format", "png")
key = "favicon:{}".format(domain)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
url = "https://www.google.com/s2/favicons?domain={domain}"
url = url.format(domain=urllib.parse.quote(domain))
try:
print("Requesting {}".format(url))
image = request_image(url, max_size=1)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, 16)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
def find_icons(url):
"""Yield icon entries specified in the HTML HEAD of `url`."""
url, page = get_page(url)
soup = bs4.BeautifulSoup(page, "html.parser")
for pattern in LINK_REL_PATTERNS:
for tag in soup.find_all("link", dict(rel=pattern)):
href = urllib.parse.urljoin(url, tag.attrs["href"])
size = tag.attrs.get("sizes", "0x0")
if size == "any":
size = "1000x1000"
yield dict(url=href, size=int(size.split("x")[0]))
# Fall back on looking for icons at the server root.
join = lambda x: urllib.parse.urljoin(url, x)
yield dict(url=join("/apple-touch-icon.png"), fallback=True)
yield dict(url=join("/apple-touch-icon-precomposed.png"), fallback=True)
def get_cache_control(max_age):
"""Return a Cache-Control header for `max_age`."""
return "public, max-age={:d}".format(max_age)
def get_from_cache(key):
"""Return value, ttl for `key` from cache."""
return cache.get(key), cache.ttl(key)
def get_letter(url):
"""Return letter to represent `url`."""
if "://" not in url:
url = "http://{}".format(url)
url = urllib.parse.urlparse(url).netloc
url = url.split(".")
url = url[-2] if len(url) > 1 else url[0]
return url[0].lower() if url else "x"
@functools.lru_cache(256)
def get_letter_icon(letter):
"""Return letter icon PNG bytes for `url`."""
fname = "letter-icons/{}.png".format(letter)
if os.path.isfile(fname):
with open(fname, "rb") as f:
return f.read()
name = unicodedata.name(letter)
name = name.lower().replace(" ", "-")
fname = "letter-icons/{}.png".format(name)
if os.path.isfile(fname):
with open(fname, "rb") as f:
return f.read()
return FALLBACK_PNG
def get_page(url, timeout=15):
"""Return evaluated `url`, HTML page as text."""
if "://" in url:
response = rs.get(url, timeout=timeout)
response.raise_for_status()
return response.url, response.text
for scheme in ("https", "http"):
with silent(Exception):
return get_page("{}://{}".format(scheme, url))
raise Exception("Failed to get page")
@functools.lru_cache(1)
def get_twitter_api():
"""Return Twitter API object."""
key = os.environ["TWITTER_API_KEY"]
secret = os.environ["TWITTER_API_SECRET"]
auth = tweepy.AppAuthHandler(key, secret)
return tweepy.API(auth)
@app.route("/google-search-suggestions")
def google_search_suggestions():
"""Return a JSON array of Google search suggestions for query."""
query = flask.request.args["query"]
lang = flask.request.args.get("lang", "en")
key = "google-search-suggestions:{}:{}".format(query, lang)
if cache.exists(key):
print("Found in cache: {}".format(key))
data, ttl = get_from_cache(key)
return make_response(pickle.loads(data), "json", ttl)
url = "https://suggestqueries.google.com/complete/search?output=toolbar&q={query}&hl={lang}"
url = url.format(query=urllib.parse.quote_plus(query), lang=lang)
try:
print("Requesting {}".format(url))
response = rs.get(url, timeout=5)
response.raise_for_status()
root = ET.fromstring(response.text)
suggestions = [x.get("data") for x in root.iter("suggestion")]
cache.set(key, pickle.dumps(suggestions), ex=3600)
return make_response(suggestions, "json")
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
cache.set(key, pickle.dumps([]), ex=3600)
return make_response([], "json", 3600)
@app.route("/icon")
def icon():
"""Return apple-touch-icon or favicon for website."""
url = flask.request.args["url"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "icon:{}:{:d}".format(url, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
try:
print("Parsing {}".format(url))
icons = list(find_icons(url))
icons.sort(key=lambda x: x.get("size", 0) or 1000)
except Exception as error:
print("Error parsing {}: {}".format(
flask.request.full_path, str(error)))
icons = []
for icon in icons:
# Ignore icons with a known size less than requested.
icon.setdefault("size", 0)
if 0 < icon["size"] < size: continue
try:
print("Requesting {}".format(icon["url"]))
image = request_image(icon["url"])
if not is_svg(image):
with PIL.Image.open(io.BytesIO(image)) as pi:
if min(pi.width, pi.height) < size: continue
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
icon["url"], str(error)))
# Fall back on letter icons for domain.
image = get_letter_icon(get_letter(url))
image = resize_image(image, size)
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
@app.route("/icons")
def icons():
"""Return JSON listing of icons for website."""
url = flask.request.args["url"]
key = "icons:{}".format(url)
if cache.exists(key):
print("Found in cache: {}".format(key))
data, ttl = get_from_cache(key)
return make_response(pickle.loads(data), "json", ttl)
try:
print("Parsing {}".format(url))
icons = list(find_icons(url))
except Exception as error:
print("Error parsing {}: {}".format(
flask.request.full_path, str(error)))
icons = []
for i in list(range(len(icons) - 1, -1, -1)):
if icons[i].get("size", 1) < 1: del icons[i]["size"]
if icons[i].get("fallback", False): del icons[i]
data = dict(icons=icons)
cache.set(key, pickle.dumps(data), ex=300)
return make_response(data, "json", 300)
@app.route("/image")
def image():
"""Return a downscaled image read from URL."""
url = flask.request.args["url"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "image:{}:{:d}".format(url, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
try:
print("Requesting {}".format(url))
image = request_image(url, max_size=1)
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, size)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
def is_svg(image):
return (isinstance(image, str) and
image.lstrip().startswith("<svg"))
def make_response(data, format, max_age=None):
"""Return response 200 for `data` as `format`."""
if format == "base64":
text = base64.b64encode(data)
max_age = max_age or random.randint(1, 3) * 86400
return flask.Response(text, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "text/plain",
"Content-Encoding": "UTF-8",
"Content-Length": str(len(text)),
"Cache-Control": get_cache_control(max_age),
})
if format == "json":
text = json.dumps(data, ensure_ascii=False)
max_age = max_age or 3600
return flask.Response(text, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "application/json",
"Content-Encoding": "UTF-8",
"Content-Length": str(len(text)),
"Cache-Control": get_cache_control(max_age),
})
if format == "png":
max_age = max_age or random.randint(1, 3) * 86400
return flask.Response(data, 200, {
"Access-Control-Allow-Origin": "*",
"Content-Type": "image/png",
"Content-Length": str(len(data)),
"Cache-Control": get_cache_control(max_age),
})
def request_image(url, max_size=1, timeout=15):
"""Request and return image at `url` at most `max_size` MB."""
# Avoid getting caught reading insanely large files.
# http://docs.python-requests.org/en/master/user/advanced/#body-content-workflow
if url in blacklist:
raise ValueError("URL blacklisted")
max_size = max_size * 1024 * 1024
with contextlib.closing(rs.get(
url, timeout=timeout, stream=True)) as response:
response.raise_for_status()
if ("content-length" in response.headers and
response.headers["content-length"].isdigit() and
int(response.headers["content-length"]) > max_size):
raise ValueError("Too large")
content_type = response.headers.get("content-type", "").lower()
if url.endswith(".svg") or content_type == "image/svg+xml":
# SVG, return as string.
image = response.text
if len(image) > max_size:
blacklist.add(url)
raise ValueError("Too large")
return image
# Raster, return as bytes.
image = response.raw.read(max_size+1, decode_content=True)
if len(image) > max_size:
blacklist.add(url)
raise ValueError("Too large")
return image
def resize_image(image, size):
"""Resize `image` to `size` and return PNG bytes."""
if is_svg(image):
image = cairosvg.svg2png(bytestring=image.encode("utf-8"),
output_width=size,
output_height=size)
with PIL.Image.open(io.BytesIO(image)) as pi:
if pi.mode not in ("RGB", "RGBA"):
pi = pi.convert("RGBA")
pi.thumbnail((size, size), PIL.Image.BICUBIC)
if pi.width != pi.height:
# Add transparent margins to make a square image.
bg = PIL.Image.new("RGBA", (size, size), (255, 255, 255, 0))
bg.paste(pi, ((size - pi.width) // 2, (size - pi.height) // 2))
pi = bg
out = io.BytesIO()
pi.save(out, "PNG")
return out.getvalue()
def rex(a, b):
"""Return a random amount of seconds between a and b days."""
return random.randint(int(a*86400), int(b*86400))
@contextlib.contextmanager
def silent(*exceptions, tb=False):
"""Try to execute body, ignoring `exceptions`."""
try:
yield
except exceptions:
if tb: traceback.print_exc()
@app.route("/twitter-icon")
def twitter_icon():
"""Return a downscaled Twitter profile image."""
user = flask.request.args["user"]
size = int(flask.request.args["size"])
format = flask.request.args.get("format", "png")
key = "twitter-icon:{}:{:d}".format(user, size)
if cache.exists(key):
print("Found in cache: {}".format(key))
image, ttl = get_from_cache(key)
return make_response(image, format, ttl)
try:
api = get_twitter_api()
user_object = api.get_user(user)
url = user_object.profile_image_url_https
# Remove size variant to get the full "original" image.
# https://developer.twitter.com/en/docs/accounts-and-users/user-profile-images-and-banners
url = re.sub(r"_([^/_.]+)(\.\w+)$", r"\2", url)
print("Found profile image URL {}".format(url))
image = request_image(url, max_size=5)
image = resize_image(image, size)
if imghdr.what(None, image) != "png":
raise ValueError("Non-PNG data received")
cache.set(key, image, ex=rex(3, 5))
return make_response(image, format)
except Exception as error:
print("Error requesting {}: {}".format(
flask.request.full_path, str(error)))
image = resize_image(FALLBACK_PNG, size)
cache.set(key, image, ex=7200)
return make_response(image, format, 7200)
| [
"flask.request.args.get",
"requests.Session",
"flask.Flask",
"re.compile",
"pickle.dumps",
"base64.b64encode",
"io.BytesIO",
"pickle.loads",
"tweepy.AppAuthHandler",
"redis.from_url",
"json.dumps",
"dotenv.load_dotenv",
"tweepy.API",
"xml.etree.ElementTree.fromstring",
"traceback.print_e... | [((1437, 1457), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (1455, 1457), False, 'import dotenv\n'), ((1695, 1716), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (1706, 1716), False, 'import flask\n'), ((2054, 2159), 'requests.adapters.HTTPAdapter', 'requests.adapters.HTTPAdapter', ([], {'pool_connections': '(10)', 'pool_maxsize': '(100)', 'max_retries': '(0)', 'pool_block': '(False)'}), '(pool_connections=10, pool_maxsize=100,\n max_retries=0, pool_block=False)\n', (2083, 2159), False, 'import requests\n'), ((2282, 2300), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2298, 2300), False, 'import requests\n'), ((5987, 6011), 'functools.lru_cache', 'functools.lru_cache', (['(256)'], {}), '(256)\n', (6006, 6011), False, 'import functools\n'), ((6884, 6906), 'functools.lru_cache', 'functools.lru_cache', (['(1)'], {}), '(1)\n', (6903, 6906), False, 'import functools\n'), ((1541, 1573), 're.compile', 're.compile', (['"""^apple-touch-icon$"""'], {}), "('^apple-touch-icon$')\n", (1551, 1573), False, 'import re\n'), ((1579, 1623), 're.compile', 're.compile', (['"""^apple-touch-icon-precomposed$"""'], {}), "('^apple-touch-icon-precomposed$')\n", (1589, 1623), False, 'import re\n'), ((1629, 1649), 're.compile', 're.compile', (['"""^icon$"""'], {}), "('^icon$')\n", (1639, 1649), False, 'import re\n'), ((1655, 1684), 're.compile', 're.compile', (['"""^shortcut icon$"""'], {}), "('^shortcut icon$')\n", (1665, 1684), False, 'import re\n'), ((1803, 1847), 'redis.from_url', 'redis.from_url', (["os.environ['REDISCLOUD_URL']"], {}), "(os.environ['REDISCLOUD_URL'])\n", (1817, 1847), False, 'import redis\n'), ((1887, 1904), 'redislite.Redis', 'redislite.Redis', ([], {}), '()\n', (1902, 1904), False, 'import redislite\n'), ((2603, 2642), 'flask.request.args.get', 'flask.request.args.get', (['"""format"""', '"""png"""'], {}), "('format', 'png')\n", (2625, 2642), False, 'import flask\n'), ((3773, 3812), 'flask.request.args.get', 'flask.request.args.get', (['"""format"""', '"""png"""'], {}), "('format', 'png')\n", (3795, 3812), False, 'import flask\n'), ((4815, 4853), 'bs4.BeautifulSoup', 'bs4.BeautifulSoup', (['page', '"""html.parser"""'], {}), "(page, 'html.parser')\n", (4832, 4853), False, 'import bs4\n'), ((6147, 6168), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (6161, 6168), False, 'import os\n'), ((6246, 6270), 'unicodedata.name', 'unicodedata.name', (['letter'], {}), '(letter)\n', (6262, 6270), False, 'import unicodedata\n'), ((6367, 6388), 'os.path.isfile', 'os.path.isfile', (['fname'], {}), '(fname)\n', (6381, 6388), False, 'import os\n'), ((7064, 7098), 'tweepy.AppAuthHandler', 'tweepy.AppAuthHandler', (['key', 'secret'], {}), '(key, secret)\n', (7085, 7098), False, 'import tweepy\n'), ((7110, 7126), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (7120, 7126), False, 'import tweepy\n'), ((7323, 7359), 'flask.request.args.get', 'flask.request.args.get', (['"""lang"""', '"""en"""'], {}), "('lang', 'en')\n", (7345, 7359), False, 'import flask\n'), ((8530, 8569), 'flask.request.args.get', 'flask.request.args.get', (['"""format"""', '"""png"""'], {}), "('format', 'png')\n", (8552, 8569), False, 'import flask\n'), ((11125, 11164), 'flask.request.args.get', 'flask.request.args.get', (['"""format"""', '"""png"""'], {}), "('format', 'png')\n", (11147, 11164), False, 'import flask\n'), ((15974, 16013), 'flask.request.args.get', 'flask.request.args.get', (['"""format"""', '"""png"""'], {}), "('format', 'png')\n", (15996, 16013), False, 'import flask\n'), ((3729, 3758), 're.sub', 're.sub', (['"""^.*?://"""', '""""""', 'domain'], {}), "('^.*?://', '', domain)\n", (3735, 3758), False, 'import re\n'), ((7912, 7940), 'xml.etree.ElementTree.fromstring', 'ET.fromstring', (['response.text'], {}), '(response.text)\n', (7925, 7940), True, 'import xml.etree.ElementTree as ET\n'), ((10875, 10893), 'pickle.dumps', 'pickle.dumps', (['data'], {}), '(data)\n', (10887, 10893), False, 'import pickle\n'), ((12219, 12241), 'base64.b64encode', 'base64.b64encode', (['data'], {}), '(data)\n', (12235, 12241), False, 'import base64\n'), ((12628, 12664), 'json.dumps', 'json.dumps', (['data'], {'ensure_ascii': '(False)'}), '(data, ensure_ascii=False)\n', (12638, 12664), False, 'import json\n'), ((15371, 15383), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (15381, 15383), False, 'import io\n'), ((16539, 16581), 're.sub', 're.sub', (['"""_([^/_.]+)(\\\\.\\\\w+)$"""', '"""\\\\2"""', 'url'], {}), "('_([^/_.]+)(\\\\.\\\\w+)$', '\\\\2', url)\n", (16545, 16581), False, 'import re\n'), ((3129, 3153), 'imghdr.what', 'imghdr.what', (['None', 'image'], {}), '(None, image)\n', (3140, 3153), False, 'import imghdr\n'), ((4244, 4268), 'imghdr.what', 'imghdr.what', (['None', 'image'], {}), '(None, image)\n', (4255, 4268), False, 'import imghdr\n'), ((7567, 7585), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (7579, 7585), False, 'import pickle\n'), ((8035, 8060), 'pickle.dumps', 'pickle.dumps', (['suggestions'], {}), '(suggestions)\n', (8047, 8060), False, 'import pickle\n'), ((10394, 10412), 'pickle.loads', 'pickle.loads', (['data'], {}), '(data)\n', (10406, 10412), False, 'import pickle\n'), ((11525, 11549), 'imghdr.what', 'imghdr.what', (['None', 'image'], {}), '(None, image)\n', (11536, 11549), False, 'import imghdr\n'), ((14933, 14950), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (14943, 14950), False, 'import io\n'), ((16737, 16761), 'imghdr.what', 'imghdr.what', (['None', 'image'], {}), '(None, image)\n', (16748, 16761), False, 'import imghdr\n'), ((8273, 8289), 'pickle.dumps', 'pickle.dumps', (['[]'], {}), '([])\n', (8285, 8289), False, 'import pickle\n'), ((9570, 9594), 'imghdr.what', 'imghdr.what', (['None', 'image'], {}), '(None, image)\n', (9581, 9594), False, 'import imghdr\n'), ((12271, 12291), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (12285, 12291), False, 'import random\n'), ((13046, 13066), 'random.randint', 'random.randint', (['(1)', '(3)'], {}), '(1, 3)\n', (13060, 13066), False, 'import random\n'), ((15756, 15777), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (15775, 15777), False, 'import traceback\n'), ((9418, 9435), 'io.BytesIO', 'io.BytesIO', (['image'], {}), '(image)\n', (9428, 9435), False, 'import io\n')] |
# Generated by Django 1.10.6 on 2017-03-13 04:46
# Modified by <NAME> on 2019-06-22 16:48
import django.db.models.deletion
import django.utils.timezone
from django.conf import settings
from django.db import migrations, models
import apps.core.models
class Migration(migrations.Migration):
initial = True
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name="Comment",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"date_created",
apps.core.models.DateTimeCreatedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
(
"date_modified",
apps.core.models.DateTimeModifiedField(
blank=True, default=django.utils.timezone.now, editable=False
),
),
("object_id", models.PositiveIntegerField()),
("comment", models.TextField()),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="contenttypes.ContentType",
),
),
(
"user",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="users",
to=settings.AUTH_USER_MODEL,
),
),
],
options={"ordering": ("date_created",)},
)
]
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.PositiveIntegerField",
"django.db.migrations.swappable_dependency"
] | [((402, 459), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (433, 459), False, 'from django.db import migrations, models\n'), ((632, 725), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (648, 725), False, 'from django.db import migrations, models\n'), ((1375, 1404), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (1402, 1404), False, 'from django.db import migrations, models\n'), ((1435, 1453), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1451, 1453), False, 'from django.db import migrations, models\n'), ((1530, 1628), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""contenttypes.ContentType"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'contenttypes.ContentType')\n", (1547, 1628), False, 'from django.db import migrations, models\n'), ((1781, 1899), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""users"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='users', to=settings.AUTH_USER_MODEL)\n", (1798, 1899), False, 'from django.db import migrations, models\n')] |
# -*- coding: utf-8 -*-
"""
obspy.io.nied.knet - K-NET/KiK-net read support for ObsPy
=========================================================
Reading of the K-NET and KiK-net ASCII format as defined on
http://www.kyoshin.bosai.go.jp.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import re
import numpy as np
from obspy import UTCDateTime, Stream, Trace
from obspy.core.trace import Stats
class KNETException(Exception):
pass
def _buffer_proxy(filename_or_buf, function, reset_fp=True,
file_mode="rb", *args, **kwargs):
"""
Calls a function with an open file or file-like object as the first
argument. If the file originally was a filename, the file will be
opened, otherwise it will just be passed to the underlying function.
:param filename_or_buf: File to pass.
:type filename_or_buf: str, open file, or file-like object.
:param function: The function to call.
:param reset_fp: If True, the file pointer will be set to the initial
position after the function has been called.
:type reset_fp: bool
:param file_mode: Mode to open file in if necessary.
"""
try:
position = filename_or_buf.tell()
is_buffer = True
except AttributeError:
is_buffer = False
if is_buffer is True:
ret_val = function(filename_or_buf, *args, **kwargs)
if reset_fp:
filename_or_buf.seek(position, 0)
return ret_val
else:
with open(filename_or_buf, file_mode) as fh:
return function(fh, *args, **kwargs)
def _is_knet_ascii(filename_or_buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param filename_or_buf: File to test.
:type filename_or_buf: str or file-like object.
"""
try:
return _buffer_proxy(filename_or_buf, _internal_is_knet_ascii,
reset_fp=True)
# Happens for example when passing the data as a string which would be
# interpreted as a filename.
except (OSError, UnicodeDecodeError):
return False
def _internal_is_knet_ascii(buf):
"""
Checks if the file is a valid K-NET/KiK-net ASCII file.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
first_string = buf.read(11).decode()
# File has less than 11 characters
if len(first_string) != 11:
return False
if first_string == 'Origin Time':
return True
return False
def _prep_hdr_line(name, line):
"""
Helper function to check the contents of a header line and split it.
:param name: String that the line should start with.
:type name: str
:param line: Line to check and split.
:type line: str
"""
if not line.startswith(name):
raise KNETException("Expected line to start with %s but got %s "
% (name, line))
else:
return line.split()
def _read_knet_hdr(hdrlines, convert_stnm=False, **kwargs):
"""
Read the header values into a dictionary.
:param hdrlines: List of the header lines of a a K-NET/KiK-net ASCII file
:type hdrlines: list
:param convert_stnm: For station names with 6 letters write the last two
letters of the station code to the 'location' field
:type convert_stnm: bool
"""
hdrdict = {'knet': {}}
hdrnames = ['Origin Time', 'Lat.', 'Long.', 'Depth. (km)', 'Mag.',
'Station Code', 'Station Lat.', 'Station Long.',
'Station Height(m)', 'Record Time', 'Sampling Freq(Hz)',
'Duration Time(s)', 'Dir.', 'Scale Factor', 'Max. Acc. (gal)',
'Last Correction', 'Memo.']
_i = 0
# Event information
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['evot'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lat = float(flds[1])
hdrdict['knet']['evla'] = lat
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
lon = float(flds[1])
hdrdict['knet']['evlo'] = lon
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dp = float(flds[2])
hdrdict['knet']['evdp'] = dp
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
mag = float(flds[1])
hdrdict['knet']['mag'] = mag
# Station information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
# K-NET and KiK-Net station names can be more than 5 characters long
# which will cause the station name to be truncated when writing the
# the trace as miniSEED; if convert_stnm is enabled, the last two
# letters of the station code are written to the 'location' field
stnm = flds[2]
location = ''
if convert_stnm and len(stnm) > 5:
location = stnm[-2:]
stnm = stnm[:-2]
if len(stnm) > 7:
raise KNETException(
"Station name can't be more than 7 characters long!")
hdrdict['station'] = stnm
hdrdict['location'] = location
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stla'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stlo'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['stel'] = float(flds[2])
# Data information
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
# A 15 s delay is added to the record time by the
# the K-NET and KiK-Net data logger
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S') - 15.0
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['starttime'] = dt
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
freqstr = flds[2]
m = re.search('[0-9]*', freqstr)
freq = int(m.group())
hdrdict['sampling_rate'] = freq
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
hdrdict['knet']['duration'] = float(flds[2])
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
channel = flds[1].replace('-', '')
kiknetcomps = {'1': 'NS1', '2': 'EW1', '3': 'UD1',
'4': 'NS2', '5': 'EW2', '6': 'UD2'}
if channel.strip() in kiknetcomps.keys(): # kiknet directions are 1-6
channel = kiknetcomps[channel.strip()]
hdrdict['channel'] = channel
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
eqn = flds[2]
num, denom = eqn.split('/')
num = float(re.search('[0-9]*', num).group())
denom = float(denom)
# convert the calibration from gal to m/s^2
hdrdict['calib'] = 0.01 * num / denom
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
acc = float(flds[3])
hdrdict['knet']['accmax'] = acc
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
dt = flds[2] + ' ' + flds[3]
dt = UTCDateTime.strptime(dt, '%Y/%m/%d %H:%M:%S')
# All times are in Japanese standard time which is 9 hours ahead of UTC
dt -= 9 * 3600.
hdrdict['knet']['last correction'] = dt
# The comment ('Memo') field is optional
_i += 1
flds = _prep_hdr_line(hdrnames[_i], hdrlines[_i])
if len(flds) > 1:
hdrdict['knet']['comment'] = ' '.join(flds[1:])
if len(hdrlines) != _i + 1:
raise KNETException("Expected %d header lines but got %d"
% (_i + 1, len(hdrlines)))
return hdrdict
def _read_knet_ascii(filename_or_buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param filename: K-NET/KiK-net ASCII file to be read.
:type filename: str or file-like object.
"""
return _buffer_proxy(filename_or_buf, _internal_read_knet_ascii, **kwargs)
def _internal_read_knet_ascii(buf, **kwargs):
"""
Reads a K-NET/KiK-net ASCII file and returns an ObsPy Stream object.
.. warning::
This function should NOT be called directly, it registers via the
ObsPy :func:`~obspy.core.stream.read` function, call this instead.
:param buf: File to read.
:type buf: Open file or open file like object.
"""
data = []
hdrdict = {}
cur_pos = buf.tell()
buf.seek(0, 2)
size = buf.tell()
buf.seek(cur_pos, 0)
# First read the headerlines
headerlines = []
while buf.tell() < size:
line = buf.readline().decode()
headerlines.append(line)
if line.startswith('Memo'):
hdrdict = _read_knet_hdr(headerlines, **kwargs)
break
while buf.tell() < size:
line = buf.readline()
parts = line.strip().split()
data += [float(p) for p in parts]
hdrdict['npts'] = len(data)
# The FDSN network code for the National Research Institute for Earth
# Science and Disaster Prevention (NEID JAPAN) is BO (Bosai-Ken Network)
hdrdict['network'] = 'BO'
data = np.array(data)
stats = Stats(hdrdict)
trace = Trace(data, header=stats)
return Stream([trace])
if __name__ == '__main__':
import doctest
doctest.testmod(exclude_empty=True)
| [
"obspy.Stream",
"obspy.UTCDateTime.strptime",
"numpy.array",
"doctest.testmod",
"obspy.Trace",
"obspy.core.trace.Stats",
"re.search"
] | [((3927, 3972), 'obspy.UTCDateTime.strptime', 'UTCDateTime.strptime', (['dt', '"""%Y/%m/%d %H:%M:%S"""'], {}), "(dt, '%Y/%m/%d %H:%M:%S')\n", (3947, 3972), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((6132, 6160), 're.search', 're.search', (['"""[0-9]*"""', 'freqstr'], {}), "('[0-9]*', freqstr)\n", (6141, 6160), False, 'import re\n'), ((7229, 7274), 'obspy.UTCDateTime.strptime', 'UTCDateTime.strptime', (['dt', '"""%Y/%m/%d %H:%M:%S"""'], {}), "(dt, '%Y/%m/%d %H:%M:%S')\n", (7249, 7274), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((9412, 9426), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (9420, 9426), True, 'import numpy as np\n'), ((9439, 9453), 'obspy.core.trace.Stats', 'Stats', (['hdrdict'], {}), '(hdrdict)\n', (9444, 9453), False, 'from obspy.core.trace import Stats\n'), ((9466, 9491), 'obspy.Trace', 'Trace', (['data'], {'header': 'stats'}), '(data, header=stats)\n', (9471, 9491), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((9503, 9518), 'obspy.Stream', 'Stream', (['[trace]'], {}), '([trace])\n', (9509, 9518), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((9571, 9606), 'doctest.testmod', 'doctest.testmod', ([], {'exclude_empty': '(True)'}), '(exclude_empty=True)\n', (9586, 9606), False, 'import doctest\n'), ((5856, 5901), 'obspy.UTCDateTime.strptime', 'UTCDateTime.strptime', (['dt', '"""%Y/%m/%d %H:%M:%S"""'], {}), "(dt, '%Y/%m/%d %H:%M:%S')\n", (5876, 5901), False, 'from obspy import UTCDateTime, Stream, Trace\n'), ((6843, 6867), 're.search', 're.search', (['"""[0-9]*"""', 'num'], {}), "('[0-9]*', num)\n", (6852, 6867), False, 'import re\n')] |
from flask import Blueprint, jsonify, request, redirect, abort, url_for, render_template
main = Blueprint('main', __name__)
# routes
@main.route('/', methods = ['GET'])
def Abort():
return redirect(url_for('main.index'))
# abort(403)
@main.route('/default.tpl', methods = ['GET'])
def index():
title = 'DE App'
return render_template('dflt.html', title = title)
| [
"flask.render_template",
"flask.Blueprint",
"flask.url_for"
] | [((96, 123), 'flask.Blueprint', 'Blueprint', (['"""main"""', '__name__'], {}), "('main', __name__)\n", (105, 123), False, 'from flask import Blueprint, jsonify, request, redirect, abort, url_for, render_template\n'), ((335, 376), 'flask.render_template', 'render_template', (['"""dflt.html"""'], {'title': 'title'}), "('dflt.html', title=title)\n", (350, 376), False, 'from flask import Blueprint, jsonify, request, redirect, abort, url_for, render_template\n'), ((202, 223), 'flask.url_for', 'url_for', (['"""main.index"""'], {}), "('main.index')\n", (209, 223), False, 'from flask import Blueprint, jsonify, request, redirect, abort, url_for, render_template\n')] |
from Artist import Artist
class Artwork:
def __init__(self, title='None', year_created=0,\
artist=Artist()):
self.title = title
self.year_created = year_created
self.artist = artist
def print_info(self):
self.artist.print_info()
print('Title: %s, %d' % (self.title, self.year_created))
| [
"Artist.Artist"
] | [((108, 116), 'Artist.Artist', 'Artist', ([], {}), '()\n', (114, 116), False, 'from Artist import Artist\n')] |
import datetime
from django.contrib.auth.models import User, Group
from django.utils import timezone
from rest_framework.test import APITestCase
import fvh_courier.models.base
from fvh_courier import models
class FVHAPITestCase(APITestCase):
def assert_dict_contains(self, superset, subset, path=''):
for key, expected in subset.items():
full_path = path + key
received = superset.get(key, None)
if isinstance(expected, dict) and isinstance(received, dict):
self.assert_dict_contains(superset[key], expected, full_path + '.')
else:
assert received == expected, 'Value mismatch for key {}: {} != {}'.format(
full_path, expected, received
)
def create_courier(self):
courier = models.Courier.objects.create(
company=models.CourierCompany.objects.create(name='Couriers r us'),
user=User.objects.create(
username='courier', first_name='Coranne', last_name='Courier', email='<EMAIL>'),
phone_number='+358505436657')
courier.company.coordinator = courier
courier.company.save()
return courier
def create_and_login_courier(self):
courier = self.create_courier()
self.client.force_login(courier.user)
return courier
def create_package(self, sender, **kwargs):
now = timezone.now()
return models.Package.objects.create(
pickup_at=fvh_courier.models.base.Address.objects.create(
street_address='Paradisäppelvägen 123',
postal_code='00123',
city='Ankeborg',
country='Ankerige',
lat=64.04,
lon=80.65
),
deliver_to=fvh_courier.models.base.Address.objects.create(
street_address='Helvetesapelsinvägen 666',
postal_code='00321',
city='Ankeborg',
country='Ankerige',
lat=64.54,
lon=80.05
),
height=20, width=30, depth=20, weight=2,
sender=sender,
recipient='Reginald Receiver',
recipient_phone='+358505436657',
earliest_pickup_time=now,
latest_pickup_time=now + datetime.timedelta(hours=1),
earliest_delivery_time=now + datetime.timedelta(hours=1),
latest_delivery_time=now + datetime.timedelta(hours=2),
**kwargs
)
def create_sender(self, **kwargs):
return models.Sender.objects.create(
user=User.objects.create(username='sender', first_name='Cedrik', last_name='Sender'),
address=models.Address.objects.create(
street_address="Paradisäppelvägen 123",
postal_code="00123",
city="Ankeborg",
country="Ankerige"),
phone_number='+358505436657', **kwargs)
| [
"fvh_courier.models.Address.objects.create",
"datetime.timedelta",
"django.utils.timezone.now",
"django.contrib.auth.models.User.objects.create",
"fvh_courier.models.CourierCompany.objects.create"
] | [((1422, 1436), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (1434, 1436), False, 'from django.utils import timezone\n'), ((871, 929), 'fvh_courier.models.CourierCompany.objects.create', 'models.CourierCompany.objects.create', ([], {'name': '"""Couriers r us"""'}), "(name='Couriers r us')\n", (907, 929), False, 'from fvh_courier import models\n'), ((948, 1052), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""courier"""', 'first_name': '"""Coranne"""', 'last_name': '"""Courier"""', 'email': '"""<EMAIL>"""'}), "(username='courier', first_name='Coranne', last_name=\n 'Courier', email='<EMAIL>')\n", (967, 1052), False, 'from django.contrib.auth.models import User, Group\n'), ((2634, 2713), 'django.contrib.auth.models.User.objects.create', 'User.objects.create', ([], {'username': '"""sender"""', 'first_name': '"""Cedrik"""', 'last_name': '"""Sender"""'}), "(username='sender', first_name='Cedrik', last_name='Sender')\n", (2653, 2713), False, 'from django.contrib.auth.models import User, Group\n'), ((2735, 2866), 'fvh_courier.models.Address.objects.create', 'models.Address.objects.create', ([], {'street_address': '"""Paradisäppelvägen 123"""', 'postal_code': '"""00123"""', 'city': '"""Ankeborg"""', 'country': '"""Ankerige"""'}), "(street_address='Paradisäppelvägen 123',\n postal_code='00123', city='Ankeborg', country='Ankerige')\n", (2764, 2866), False, 'from fvh_courier import models\n'), ((2333, 2360), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2351, 2360), False, 'import datetime\n'), ((2404, 2431), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(1)'}), '(hours=1)\n', (2422, 2431), False, 'import datetime\n'), ((2472, 2499), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(2)'}), '(hours=2)\n', (2490, 2499), False, 'import datetime\n')] |
# -*- coding: utf-8 -*-
import glob
import os
import json
from collections import OrderedDict
import itertools
import re
from datetime import datetime
import six
from six import iteritems
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy import or_
from .. import field_names, localization
from ..models import AccidentMarker, Involved, Vehicle
from .. import models
from ..utilities import ItmToWGS84, init_flask, CsvReader, time_delta, decode_hebrew,ImporterUI,truncate_tables
from functools import partial
import logging
failed_dirs = OrderedDict()
CONTENT_ENCODING = 'cp1255'
ACCIDENT_TYPE_REGEX = re.compile(r"Accidents Type (?P<type>\d)")
ACCIDENTS = 'accidents'
CITIES = 'cities'
STREETS = 'streets'
ROADS = "roads"
URBAN_INTERSECTION = 'urban_intersection'
NON_URBAN_INTERSECTION = 'non_urban_intersection'
DICTIONARY = "dictionary"
INVOLVED = "involved"
VEHICLES = "vehicles"
cbs_files = {
ACCIDENTS: "AccData.csv",
URBAN_INTERSECTION: "IntersectUrban.csv",
NON_URBAN_INTERSECTION: "IntersectNonUrban.csv",
STREETS: "DicStreets.csv",
DICTIONARY: "Dictionary.csv",
INVOLVED: "InvData.csv",
VEHICLES: "VehData.csv"
}
coordinates_converter = ItmToWGS84()
app = init_flask()
db = SQLAlchemy(app)
json_dumps = partial(json.dumps, encoding=models.db_encoding) if six.PY2 else json.dumps
def get_street(settlement_sign, street_sign, streets):
"""
extracts the street name using the settlement id and street id
"""
if settlement_sign not in streets:
# Changed to return blank string instead of None for correct presentation (Omer)
return u""
street_name = [decode_hebrew(x[field_names.street_name]) for x in streets[settlement_sign] if
x[field_names.street_sign] == street_sign]
# there should be only one street name, or none if it wasn't found.
return street_name[0] if len(street_name) == 1 else u""
def get_address(accident, streets):
"""
extracts the address of the main street.
tries to build the full address: <street_name> <street_number>, <settlement>,
but might return a partial one if unsuccessful.
"""
street = get_street(accident[field_names.settlement_sign], accident[field_names.street1], streets)
if not street:
return u""
# the home field is invalid if it's empty or if it contains 9999
home = accident[field_names.home] if accident[field_names.home] != 9999 else None
settlement = localization.get_city_name(accident[field_names.settlement_sign])
if not home and not settlement:
return street
if not home and settlement:
return u"{}, {}".format(street, settlement)
if home and not settlement:
return u"{} {}".format(street, home)
return u"{} {}, {}".format(street, home, settlement)
def get_streets(accident, streets):
"""
extracts the streets the accident occurred in.
every accident has a main street and a secondary street.
:return: a tuple containing both streets.
"""
main_street = get_address(accident, streets)
secondary_street = get_street(accident[field_names.settlement_sign], accident[field_names.street2], streets)
return main_street, secondary_street
def get_junction(accident, roads):
"""
extracts the junction from an accident
omerxx: added "km" parameter to the calculation to only show the right junction,
every non-urban accident shows nearest junction with distance and direction
:return: returns the junction or None if it wasn't found
"""
if accident["KM"] is not None and accident[field_names.non_urban_intersection] is None:
min_dist = 100000
key = (), ()
junc_km = 0
for option in roads:
if accident[field_names.road1] == option[0] and abs(accident["KM"]-option[2]) < min_dist:
min_dist = abs(accident["KM"]-option[2])
key = accident[field_names.road1], option[1], option[2]
junc_km = option[2]
junction = roads.get(key, None)
if junction:
if accident["KM"] - junc_km > 0:
direction = u"צפונית" if accident[field_names.road1] % 2 == 0 else u"מזרחית"
else:
direction = u"דרומית" if accident[field_names.road1] % 2 == 0 else u"מערבית"
if abs(float(accident["KM"] - junc_km)/10) >= 1:
string = str(abs(float(accident["KM"])-junc_km)/10) + u" ק״מ " + direction + u" ל" + \
decode_hebrew(junction)
elif 0 < abs(float(accident["KM"] - junc_km)/10) < 1:
string = str(int((abs(float(accident["KM"])-junc_km)/10)*1000)) + u" מטרים " + direction + u" ל" + \
decode_hebrew(junction)
else:
string = decode_hebrew(junction)
return string
else:
return u""
elif accident[field_names.non_urban_intersection] is not None:
key = accident[field_names.road1], accident[field_names.road2], accident["KM"]
junction = roads.get(key, None)
return decode_hebrew(junction) if junction else u""
else:
return u""
def parse_date(accident):
"""
parses an accident's date
"""
year = accident[field_names.accident_year]
month = accident[field_names.accident_month]
day = accident[field_names.accident_day]
'''
hours calculation explanation - The value of the hours is between 1 to 96.
These values represent 15 minutes each that start at 00:00:
1 equals 00:00, 2 equals 00:15, 3 equals 00:30 and so on.
'''
minutes = accident[field_names.accident_hour] * 15 - 15
hours = int(minutes // 60)
minutes %= 60
accident_date = datetime(year, month, day, hours, minutes, 0)
return accident_date
def load_extra_data(accident, streets, roads):
"""
loads more data about the accident
:return: a dictionary containing all the extra fields and their values
:rtype: dict
"""
extra_fields = {}
# if the accident occurred in an urban setting
if bool(accident[field_names.urban_intersection]):
main_street, secondary_street = get_streets(accident, streets)
if main_street:
extra_fields[field_names.street1] = main_street
if secondary_street:
extra_fields[field_names.street2] = secondary_street
# if the accident occurred in a non urban setting (highway, etc')
if bool(accident[field_names.non_urban_intersection]):
junction = get_junction(accident, roads)
if junction:
extra_fields[field_names.junction_name] = junction
# localize static accident values
for field in localization.get_supported_tables():
# if we have a localized field for that particular field, save the field value
# it will be fetched we deserialized
if accident[field] and localization.get_field(field, accident[field]):
extra_fields[field] = accident[field]
return extra_fields
def get_data_value(value):
"""
:returns: value for parameters which are not mandatory in an accident data
OR -1 if the parameter value does not exist
"""
return int(value) if value else -1
def import_accidents(provider_code, accidents, streets, roads, **kwargs):
logging.info("\tReading accident data from '%s'..." % os.path.basename(accidents.name()))
markers = []
for accident in accidents:
if field_names.x_coordinate not in accident or field_names.y_coordinate not in accident:
raise ValueError("Missing x and y coordinates")
if accident[field_names.x_coordinate] and accident[field_names.y_coordinate]:
lng, lat = coordinates_converter.convert(accident[field_names.x_coordinate],
accident[field_names.y_coordinate])
else:
lng, lat = None, None # Must insert everything to avoid foreign key failure
main_street, secondary_street = get_streets(accident, streets)
assert(int(provider_code) == int(accident[field_names.file_type]))
marker = {
"id": int(accident[field_names.id]),
"provider_code": int(provider_code),
"title": "Accident",
"description": json_dumps(load_extra_data(accident, streets, roads)),
"address": get_address(accident, streets),
"latitude": lat,
"longitude": lng,
"subtype": int(accident[field_names.accident_type]),
"severity": int(accident[field_names.accident_severity]),
"created": parse_date(accident),
"locationAccuracy": int(accident[field_names.igun]),
"roadType": int(accident[field_names.road_type]),
"roadShape": int(accident[field_names.road_shape]),
"dayType": int(accident[field_names.day_type]),
"unit": int(accident[field_names.unit]),
"mainStreet": main_street,
"secondaryStreet": secondary_street,
"junction": get_junction(accident, roads),
"one_lane": get_data_value(accident[field_names.one_lane]),
"multi_lane": get_data_value(accident[field_names.multi_lane]),
"speed_limit": get_data_value(accident[field_names.speed_limit]),
"intactness": get_data_value(accident[field_names.intactness]),
"road_width": get_data_value(accident[field_names.road_width]),
"road_sign": get_data_value(accident[field_names.road_sign]),
"road_light": get_data_value(accident[field_names.road_light]),
"road_control": get_data_value(accident[field_names.road_control]),
"weather": get_data_value(accident[field_names.weather]),
"road_surface": get_data_value(accident[field_names.road_surface]),
"road_object": get_data_value(accident[field_names.road_object]),
"object_distance": get_data_value(accident[field_names.object_distance]),
"didnt_cross": get_data_value(accident[field_names.didnt_cross]),
"cross_mode": get_data_value(accident[field_names.cross_mode]),
"cross_location": get_data_value(accident[field_names.cross_location]),
"cross_direction": get_data_value(accident[field_names.cross_direction]),
"road1": get_data_value(accident[field_names.road1]),
"road2": get_data_value(accident[field_names.road2]),
"km": float(accident[field_names.km]) if accident[field_names.km] else None,
"yishuv_symbol": get_data_value(accident[field_names.yishuv_symbol]),
"geo_area": get_data_value(accident[field_names.geo_area]),
"day_night": get_data_value(accident[field_names.day_night]),
"day_in_week": get_data_value(accident[field_names.day_in_week]),
"traffic_light": get_data_value(accident[field_names.traffic_light]),
"region": get_data_value(accident[field_names.region]),
"district": get_data_value(accident[field_names.district]),
"natural_area": get_data_value(accident[field_names.natural_area]),
"minizipali_status": get_data_value(accident[field_names.minizipali_status]),
"yishuv_shape": get_data_value(accident[field_names.yishuv_shape]),
}
markers.append(marker)
return markers
def import_involved(provider_code, involved, **kwargs):
logging.info("\tReading involved data from '%s'..." % os.path.basename(involved.name()))
involved_result = []
for involve in involved:
if not involve[field_names.id]: # skip lines with no accident id
continue
involved_result.append({
"accident_id": int(involve[field_names.id]),
"provider_code": int(provider_code),
"involved_type": int(involve[field_names.involved_type]),
"license_acquiring_date": int(involve[field_names.license_acquiring_date]),
"age_group": int(involve[field_names.age_group]),
"sex": get_data_value(involve[field_names.sex]),
"car_type": get_data_value(involve[field_names.car_type]),
"safety_measures": get_data_value(involve[field_names.safety_measures]),
"home_city": get_data_value(involve[field_names.home_city]),
"injury_severity": get_data_value(involve[field_names.injury_severity]),
"injured_type": get_data_value(involve[field_names.injured_type]),
"Injured_position": get_data_value(involve[field_names.injured_position]),
"population_type": get_data_value(involve[field_names.population_type]),
"home_district": get_data_value(involve[field_names.home_district]),
"home_nafa": get_data_value(involve[field_names.home_nafa]),
"home_area": get_data_value(involve[field_names.home_area]),
"home_municipal_status": get_data_value(involve[field_names.home_municipal_status]),
"home_residence_type": get_data_value(involve[field_names.home_residence_type]),
"hospital_time": get_data_value(involve[field_names.hospital_time]),
"medical_type": get_data_value(involve[field_names.medical_type]),
"release_dest": get_data_value(involve[field_names.release_dest]),
"safety_measures_use": get_data_value(involve[field_names.safety_measures_use]),
"late_deceased": get_data_value(involve[field_names.late_deceased]),
})
return involved_result
def import_vehicles(provider_code, vehicles, **kwargs):
logging.info("\tReading vehicles data from '%s'..." % os.path.basename(vehicles.name()))
vehicles_result = []
for vehicle in vehicles:
vehicles_result.append({
"accident_id": int(vehicle[field_names.id]),
"provider_code": int(provider_code),
"engine_volume": int(vehicle[field_names.engine_volume]),
"manufacturing_year": get_data_value(vehicle[field_names.manufacturing_year]),
"driving_directions": get_data_value(vehicle[field_names.driving_directions]),
"vehicle_status": get_data_value(vehicle[field_names.vehicle_status]),
"vehicle_attribution": get_data_value(vehicle[field_names.vehicle_attribution]),
"vehicle_type": get_data_value(vehicle[field_names.vehicle_type]),
"seats": get_data_value(vehicle[field_names.seats]),
"total_weight": get_data_value(vehicle[field_names.total_weight]),
})
return vehicles_result
def get_files(directory):
for name, filename in iteritems(cbs_files):
if name not in (STREETS, NON_URBAN_INTERSECTION, ACCIDENTS, INVOLVED, VEHICLES):
continue
files = [path for path in os.listdir(directory)
if filename.lower() in path.lower()]
amount = len(files)
if amount == 0:
raise ValueError("Not found: '%s'" % filename)
if amount > 1:
raise ValueError("Ambiguous: '%s'" % filename)
csv = CsvReader(os.path.join(directory, files[0]), encoding="cp1255")
if name == STREETS:
streets_map = {}
for settlement in itertools.groupby(csv, lambda street: street.get(field_names.settlement, "OTHER")):
key, val = tuple(settlement)
streets_map[key] = [{field_names.street_sign: x[field_names.street_sign],
field_names.street_name: x[field_names.street_name]} for x in val if
field_names.street_name in x and field_names.street_sign in x]
csv.close()
yield name, streets_map
elif name == NON_URBAN_INTERSECTION:
roads = {(x[field_names.road1], x[field_names.road2], x["KM"]): x[field_names.junction_name] for x in csv if
field_names.road1 in x and field_names.road2 in x}
csv.close()
yield ROADS, roads
elif name in (ACCIDENTS, INVOLVED, VEHICLES):
yield name, csv
def chunks(l, n, xrange):
"""Yield successive n-sized chunks from l."""
for i in xrange(0, len(l), n):
yield l[i:i + n]
def import_to_datastore(directory, provider_code, batch_size):
"""
goes through all the files in a given directory, parses and commits them
"""
try: xrange
except NameError:
xrange = range
try:
assert batch_size > 0
files_from_cbs = dict(get_files(directory))
if len(files_from_cbs) == 0:
return 0
logging.info("Importing '{}'".format(directory))
started = datetime.now()
new_items = 0
all_existing_accidents_ids = set(map(lambda x: x[0], db.session.query(AccidentMarker.id).all()))
accidents = import_accidents(provider_code=provider_code, **files_from_cbs)
accidents = [accident for accident in accidents if accident['id'] not in all_existing_accidents_ids]
new_items += len(accidents)
for accidents_chunk in chunks(accidents, batch_size, xrange):
db.session.bulk_insert_mappings(AccidentMarker, accidents_chunk)
all_involved_accident_ids = set(map(lambda x: x[0], db.session.query(Involved.accident_id).all()))
involved = import_involved(provider_code=provider_code, **files_from_cbs)
involved = [x for x in involved if x['accident_id'] not in all_involved_accident_ids]
for involved_chunk in chunks(involved, batch_size, xrange):
db.session.bulk_insert_mappings(Involved, involved_chunk)
new_items += len(involved)
all_vehicles_accident_ids = set(map(lambda x: x[0], db.session.query(Vehicle.accident_id).all()))
vehicles = import_vehicles(provider_code=provider_code, **files_from_cbs)
vehicles = [x for x in vehicles if x['accident_id'] not in all_vehicles_accident_ids]
for vehicles_chunk in chunks(vehicles, batch_size, xrange):
db.session.bulk_insert_mappings(Vehicle, vehicles_chunk)
new_items += len(vehicles)
logging.info("\t{0} items in {1}".format(new_items, time_delta(started)))
return new_items
except ValueError as e:
failed_dirs[directory] = str(e)
return 0
def delete_invalid_entries():
"""
deletes all markers in the database with null latitude or longitude
first deletes from tables Involved and Vehicle, then from table AccidentMarker
"""
marker_ids_to_delete = db.session.query(AccidentMarker.id).filter(or_((AccidentMarker.longitude == None),
(AccidentMarker.latitude == None))).all()
marker_ids_to_delete = [acc_id[0] for acc_id in marker_ids_to_delete]
q = db.session.query(Involved).filter(Involved.accident_id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from Involved')
q.delete(synchronize_session='fetch')
q = db.session.query(Vehicle).filter(Vehicle.accident_id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from Vehicle')
q.delete(synchronize_session='fetch')
q = db.session.query(AccidentMarker).filter(AccidentMarker.id.in_(marker_ids_to_delete))
if q.all():
print('deleting invalid entries from AccidentMarker')
q.delete(synchronize_session='fetch')
db.session.commit()
def get_provider_code(directory_name=None):
if directory_name:
match = ACCIDENT_TYPE_REGEX.match(directory_name)
if match:
return int(match.groupdict()['type'])
ans = ""
while not ans.isdigit():
ans = six.moves.input("Directory provider code is invalid. Please enter a valid code: ")
if ans.isdigit():
return int(ans)
def main(specific_folder, delete_all, path, batch_size):
import_ui = ImporterUI(path, specific_folder, delete_all)
dir_name = import_ui.source_path()
if specific_folder:
dir_list = [dir_name]
else:
dir_list = glob.glob("{0}/*/*".format(dir_name))
# wipe all the AccidentMarker and Vehicle and Involved data first
if import_ui.is_delete_all():
truncate_tables(db, (Vehicle, Involved, AccidentMarker))
started = datetime.now()
total = 0
for directory in dir_list:
parent_directory = os.path.basename(os.path.dirname(os.path.join(os.pardir, directory)))
provider_code = get_provider_code(parent_directory)
total += import_to_datastore(directory, provider_code, batch_size)
delete_invalid_entries()
failed = ["\t'{0}' ({1})".format(directory, fail_reason) for directory, fail_reason in
iteritems(failed_dirs)]
logging.info("Finished processing all directories{0}{1}".format(", except:\n" if failed else "",
"\n".join(failed)))
logging.info("Total: {0} items in {1}".format(total, time_delta(started)))
| [
"datetime.datetime",
"collections.OrderedDict",
"os.listdir",
"six.moves.input",
"re.compile",
"os.path.join",
"datetime.datetime.now",
"flask.ext.sqlalchemy.SQLAlchemy",
"functools.partial",
"six.iteritems",
"sqlalchemy.or_"
] | [((551, 564), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (562, 564), False, 'from collections import OrderedDict\n'), ((616, 658), 're.compile', 're.compile', (['"""Accidents Type (?P<type>\\\\d)"""'], {}), "('Accidents Type (?P<type>\\\\d)')\n", (626, 658), False, 'import re\n'), ((1230, 1245), 'flask.ext.sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (1240, 1245), False, 'from flask.ext.sqlalchemy import SQLAlchemy\n'), ((1260, 1308), 'functools.partial', 'partial', (['json.dumps'], {'encoding': 'models.db_encoding'}), '(json.dumps, encoding=models.db_encoding)\n', (1267, 1308), False, 'from functools import partial\n'), ((5722, 5767), 'datetime.datetime', 'datetime', (['year', 'month', 'day', 'hours', 'minutes', '(0)'], {}), '(year, month, day, hours, minutes, 0)\n', (5730, 5767), False, 'from datetime import datetime\n'), ((14630, 14650), 'six.iteritems', 'iteritems', (['cbs_files'], {}), '(cbs_files)\n', (14639, 14650), False, 'from six import iteritems\n'), ((20301, 20315), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (20313, 20315), False, 'from datetime import datetime\n'), ((16675, 16689), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (16687, 16689), False, 'from datetime import datetime\n'), ((19697, 19784), 'six.moves.input', 'six.moves.input', (['"""Directory provider code is invalid. Please enter a valid code: """'], {}), "(\n 'Directory provider code is invalid. Please enter a valid code: ')\n", (19712, 19784), False, 'import six\n'), ((15092, 15125), 'os.path.join', 'os.path.join', (['directory', 'files[0]'], {}), '(directory, files[0])\n', (15104, 15125), False, 'import os\n'), ((20729, 20751), 'six.iteritems', 'iteritems', (['failed_dirs'], {}), '(failed_dirs)\n', (20738, 20751), False, 'from six import iteritems\n'), ((14798, 14819), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (14808, 14819), False, 'import os\n'), ((18574, 18644), 'sqlalchemy.or_', 'or_', (['(AccidentMarker.longitude == None)', '(AccidentMarker.latitude == None)'], {}), '(AccidentMarker.longitude == None, AccidentMarker.latitude == None)\n', (18577, 18644), False, 'from sqlalchemy import or_\n'), ((20421, 20455), 'os.path.join', 'os.path.join', (['os.pardir', 'directory'], {}), '(os.pardir, directory)\n', (20433, 20455), False, 'import os\n')] |
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# NOTE: this is adapted from the official TFX taxi pipeline sample
# You can find it here: https://github.com/tensorflow/tfx/tree/master/tfx/examples/chicago_taxi_pipeline
import os # pylint: disable=unused-import
# Pipeline name will be used to identify this pipeline
PIPELINE_NAME = 'my_pipeline'
# TODO: replace with your Google Cloud project
GOOGLE_CLOUD_PROJECT='your-cloud-project'
# TODO: replace with the GCS bucket where you'd like to store model artifacts
# Only include the bucket name here, without the 'gs://'
GCS_BUCKET_NAME = 'your-gcs-bucket'
# TODO: set your Google Cloud region below (or use us-central1)
GOOGLE_CLOUD_REGION = 'us-central1'
RUN_FN = 'pipeline.model.run_fn'
TRAIN_NUM_STEPS = 100
EVAL_NUM_STEPS = 100
BIG_QUERY_WITH_DIRECT_RUNNER_BEAM_PIPELINE_ARGS = [
'--project=' + GOOGLE_CLOUD_PROJECT,
'--temp_location=' + os.path.join('gs://', GCS_BUCKET_NAME, 'tmp'),
]
# The rate at which to sample rows from the Chicago Taxi dataset using BigQuery.
# The full taxi dataset is > 120M record. In the interest of resource
# savings and time, we've set the default for this example to be much smaller.
# Feel free to crank it up and process the full dataset!
_query_sample_rate = 0.0001 # Generate a 0.01% random sample.
# The query that extracts the examples from BigQuery. This sample uses
# a BigQuery public dataset from NOAA
BIG_QUERY_QUERY = """
SELECT
usa_wind,
usa_sshs
FROM
`bigquery-public-data.noaa_hurricanes.hurricanes`
WHERE
latitude > 19.5
AND latitude < 64.85
AND longitude > -161.755
AND longitude < -68.01
AND usa_wind IS NOT NULL
AND longitude IS NOT NULL
AND latitude IS NOT NULL
AND usa_sshs IS NOT NULL
AND usa_sshs > 0
"""
# A dict which contains the training job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#Job
GCP_AI_PLATFORM_TRAINING_ARGS = {
'project': GOOGLE_CLOUD_PROJECT,
'region': 'us-central1',
# Starting from TFX 0.14, training on AI Platform uses custom containers:
# https://cloud.google.com/ml-engine/docs/containers-overview
# You can specify a custom container here. If not specified, TFX will use
# a public container image matching the installed version of TFX.
# Set your container name below.
'masterConfig': {
'imageUri': 'gcr.io/' + GOOGLE_CLOUD_PROJECT + '/tfx-pipeline'
},
# Note that if you do specify a custom container, ensure the entrypoint
# calls into TFX's run_executor script (tfx/scripts/run_executor.py)
}
# A dict which contains the serving job parameters to be passed to Google
# Cloud AI Platform. For the full set of parameters supported by Google Cloud AI
# Platform, refer to
# https://cloud.google.com/ml-engine/reference/rest/v1/projects.models
GCP_AI_PLATFORM_SERVING_ARGS = {
'model_name': PIPELINE_NAME,
'project_id': GOOGLE_CLOUD_PROJECT,
# The region to use when serving the model. See available regions here:
# https://cloud.google.com/ml-engine/docs/regions
'regions': [GOOGLE_CLOUD_REGION],
}
| [
"os.path.join"
] | [((1458, 1503), 'os.path.join', 'os.path.join', (['"""gs://"""', 'GCS_BUCKET_NAME', '"""tmp"""'], {}), "('gs://', GCS_BUCKET_NAME, 'tmp')\n", (1470, 1503), False, 'import os\n')] |
"""A run_fn method called by the TFX Trainer component."""
import os
import logging
from tfx import v1 as tfx
from tfx_taxifare_tips.model_training import defaults
from tfx_taxifare_tips.model_training import model_trainer
from tfx_taxifare_tips.model_training import model_exporter
# TFX Trainer will call this function.
def run_fn(fn_args: tfx.components.FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs. See
https://www.tensorflow.org/tfx/api_docs/python/tfx/v1/components/FnArgs.
"""
logging.info("Model Runner started...")
logging.info("fn_args: %s", fn_args)
logging.info("")
try:
log_dir = fn_args.model_run_dir
except KeyError:
log_dir = os.path.join(os.path.dirname(fn_args.serving_model_dir), "logs")
hyperparameters = fn_args.hyperparameters
if not hyperparameters:
hyperparameters = {}
hyperparameters = defaults.update_hyperparameters(hyperparameters)
logging.info("Hyperparameter:")
logging.info(hyperparameters)
logging.info("")
logging.info("Model Runner executing model trainer...")
classifier = model_trainer.train(
data_accessor=fn_args.data_accessor,
train_data_dir=fn_args.train_files,
eval_data_dir=fn_args.eval_files,
tft_output_dir=fn_args.transform_output,
log_dir=log_dir,
hyperparameters=hyperparameters,
)
logging.info("Model Runner executing model evaluation...")
classifier = model_trainer.evaluate(
classifier=classifier,
data_accessor=fn_args.data_accessor,
eval_data_dir=fn_args.eval_files,
tft_output_dir=fn_args.transform_output,
hyperparameters=hyperparameters,
)
logging.info("Model Runner executing exporter...")
model_exporter.export_serving_model(
classifier=classifier,
serving_model_dir=fn_args.serving_model_dir,
raw_schema_location=fn_args.schema_path,
tft_output_dir=fn_args.transform_output,
)
logging.info("Model Runner completed.")
| [
"tfx_taxifare_tips.model_training.model_trainer.train",
"tfx_taxifare_tips.model_training.defaults.update_hyperparameters",
"os.path.dirname",
"tfx_taxifare_tips.model_training.model_trainer.evaluate",
"tfx_taxifare_tips.model_training.model_exporter.export_serving_model",
"logging.info"
] | [((606, 645), 'logging.info', 'logging.info', (['"""Model Runner started..."""'], {}), "('Model Runner started...')\n", (618, 645), False, 'import logging\n'), ((651, 687), 'logging.info', 'logging.info', (['"""fn_args: %s"""', 'fn_args'], {}), "('fn_args: %s', fn_args)\n", (663, 687), False, 'import logging\n'), ((693, 709), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (705, 709), False, 'import logging\n'), ((1002, 1050), 'tfx_taxifare_tips.model_training.defaults.update_hyperparameters', 'defaults.update_hyperparameters', (['hyperparameters'], {}), '(hyperparameters)\n', (1033, 1050), False, 'from tfx_taxifare_tips.model_training import defaults\n'), ((1056, 1087), 'logging.info', 'logging.info', (['"""Hyperparameter:"""'], {}), "('Hyperparameter:')\n", (1068, 1087), False, 'import logging\n'), ((1093, 1122), 'logging.info', 'logging.info', (['hyperparameters'], {}), '(hyperparameters)\n', (1105, 1122), False, 'import logging\n'), ((1128, 1144), 'logging.info', 'logging.info', (['""""""'], {}), "('')\n", (1140, 1144), False, 'import logging\n'), ((1152, 1207), 'logging.info', 'logging.info', (['"""Model Runner executing model trainer..."""'], {}), "('Model Runner executing model trainer...')\n", (1164, 1207), False, 'import logging\n'), ((1226, 1453), 'tfx_taxifare_tips.model_training.model_trainer.train', 'model_trainer.train', ([], {'data_accessor': 'fn_args.data_accessor', 'train_data_dir': 'fn_args.train_files', 'eval_data_dir': 'fn_args.eval_files', 'tft_output_dir': 'fn_args.transform_output', 'log_dir': 'log_dir', 'hyperparameters': 'hyperparameters'}), '(data_accessor=fn_args.data_accessor, train_data_dir=\n fn_args.train_files, eval_data_dir=fn_args.eval_files, tft_output_dir=\n fn_args.transform_output, log_dir=log_dir, hyperparameters=hyperparameters)\n', (1245, 1453), False, 'from tfx_taxifare_tips.model_training import model_trainer\n'), ((1513, 1571), 'logging.info', 'logging.info', (['"""Model Runner executing model evaluation..."""'], {}), "('Model Runner executing model evaluation...')\n", (1525, 1571), False, 'import logging\n'), ((1590, 1790), 'tfx_taxifare_tips.model_training.model_trainer.evaluate', 'model_trainer.evaluate', ([], {'classifier': 'classifier', 'data_accessor': 'fn_args.data_accessor', 'eval_data_dir': 'fn_args.eval_files', 'tft_output_dir': 'fn_args.transform_output', 'hyperparameters': 'hyperparameters'}), '(classifier=classifier, data_accessor=fn_args.\n data_accessor, eval_data_dir=fn_args.eval_files, tft_output_dir=fn_args\n .transform_output, hyperparameters=hyperparameters)\n', (1612, 1790), False, 'from tfx_taxifare_tips.model_training import model_trainer\n'), ((1841, 1891), 'logging.info', 'logging.info', (['"""Model Runner executing exporter..."""'], {}), "('Model Runner executing exporter...')\n", (1853, 1891), False, 'import logging\n'), ((1897, 2091), 'tfx_taxifare_tips.model_training.model_exporter.export_serving_model', 'model_exporter.export_serving_model', ([], {'classifier': 'classifier', 'serving_model_dir': 'fn_args.serving_model_dir', 'raw_schema_location': 'fn_args.schema_path', 'tft_output_dir': 'fn_args.transform_output'}), '(classifier=classifier,\n serving_model_dir=fn_args.serving_model_dir, raw_schema_location=\n fn_args.schema_path, tft_output_dir=fn_args.transform_output)\n', (1932, 2091), False, 'from tfx_taxifare_tips.model_training import model_exporter\n'), ((2132, 2171), 'logging.info', 'logging.info', (['"""Model Runner completed."""'], {}), "('Model Runner completed.')\n", (2144, 2171), False, 'import logging\n'), ((817, 859), 'os.path.dirname', 'os.path.dirname', (['fn_args.serving_model_dir'], {}), '(fn_args.serving_model_dir)\n', (832, 859), False, 'import os\n')] |