hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f723b4692781630404d7488f4a6f1b33fb840d4b | 2,190 | py | Python | newssimilarity/model/article.py | imackerracher/NewsSimilarity | 2e6a85dc9e95ef94bec2339987950f4e88f5d909 | [
"Apache-2.0"
] | null | null | null | newssimilarity/model/article.py | imackerracher/NewsSimilarity | 2e6a85dc9e95ef94bec2339987950f4e88f5d909 | [
"Apache-2.0"
] | null | null | null | newssimilarity/model/article.py | imackerracher/NewsSimilarity | 2e6a85dc9e95ef94bec2339987950f4e88f5d909 | [
"Apache-2.0"
] | null | null | null | """
Article object, read in from json
"""
class Article(object):
def __init__(self, head, lead, body,
date, time, writers,
publisher, source_outlet,
additional_information, annotation_list,
feature_list, raw_article):
"""
:param head: Headline of the article
:param lead: Lead paragraph of the article
:param body: Rest of the article
:param date: Publishing date of the article. If article was modified, the latest date
:param time: Publishing time of the article. If article was modified, the latest time
:param writers: The writers that contributed to the article
:param publisher: The news outlet that published the article
:param source_outlet: If the publisher took the article from a different source, it will be contained in this
variable
:param additional_information: E.g.: "...contributed to this report"
:param annotation_list: A list containing all the annotations as annotation objects
:param feature_list: A list containing all the features, as feature objects
:param raw_article: The raw article without any processing, etc. (contains the annotations as well)
"""
self.head = head
self.lead = lead
self.body = body
self.date = date
self.time = time
self.writers = writers
self.publisher = publisher
self.source_outlet = source_outlet
self.additional_information = additional_information
self.annotation_list = annotation_list
self.feature_list = feature_list
self.raw_article = raw_article
def get_instances(self, instance_type):
"""
Utility function to return a list of instances
:param instance_type: Type of instance (e.g. part of speech)
:return: List with instances
"""
instances = [instance
for feature in self.feature_list
if feature.feature_name == instance_type
for instance in feature.feature_instance_list]
return instances
| 40.555556 | 117 | 0.637443 |
class Article(object):
def __init__(self, head, lead, body,
date, time, writers,
publisher, source_outlet,
additional_information, annotation_list,
feature_list, raw_article):
self.head = head
self.lead = lead
self.body = body
self.date = date
self.time = time
self.writers = writers
self.publisher = publisher
self.source_outlet = source_outlet
self.additional_information = additional_information
self.annotation_list = annotation_list
self.feature_list = feature_list
self.raw_article = raw_article
def get_instances(self, instance_type):
instances = [instance
for feature in self.feature_list
if feature.feature_name == instance_type
for instance in feature.feature_instance_list]
return instances
| true | true |
f723b46aad1421a2e114cd58b3973ff37b96a76a | 3,323 | py | Python | plugins/mustGatherAccessor.py | rvanderp3/mg-helper | 349ac88e52bb3895044cb3e1f30046522cad9b0c | [
"Apache-2.0"
] | null | null | null | plugins/mustGatherAccessor.py | rvanderp3/mg-helper | 349ac88e52bb3895044cb3e1f30046522cad9b0c | [
"Apache-2.0"
] | null | null | null | plugins/mustGatherAccessor.py | rvanderp3/mg-helper | 349ac88e52bb3895044cb3e1f30046522cad9b0c | [
"Apache-2.0"
] | null | null | null | import yaml
import os.path
from os import path
import tarfile
class MustGatherAccessor:
tarcache = None
tar = None
name = "must-gather accessor"
def __init__ (self,filename):
self.filename = filename
def readfile(self):
if path.isdir(self.filename):
pass
else:
try:
# attempt to load a tar.gz file
self.tar = tarfile.open(self.filename,"r:gz")
except ValueError:
try:
self.tar = tarfile.open(self.filename,"r")
except ValueError:
raise
self.buildTarCache(self.tar)
def buildTarCache(self,tar):
self.tarcache = {}
for member in tar.getmembers():
if member.isfile() != True:
continue
# sanitize the paths - this will make a life a little easier for our plugins
splits = member.name.split("/")
if splits[0].startswith("must-gather"):
splits.remove(splits[0])
first=True
member.name=""
for part in splits:
if first:
first = False
else:
member.name+="/"
member.name+=part
self.tarcache[member.name] = member
def getValueFromObj(self,obj,*props):
if obj != None:
for prop in props:
if prop in obj:
obj = obj[prop]
else:
obj = None
break
return obj
def getFileContent(self,thepath):
content = None
if self.tarcache == None:
if path.exists(thepath):
f = open(thepath,"r")
content = f.read()
else:
f=self.tar.extractfile(thepath)
content = f.read()
return content
def getEntriesFromPath(self,path):
entries = []
if self.tarcache != None:
for key in self.tarcache:
if path in key:
entries.append(key)
else:
fullPath = os.path.join(self.filename,path)
if os.path.exists(fullPath):
pathsEntries = os.listdir(fullPath)
for entry in pathsEntries:
foundPath = os.path.join(fullPath,entry)
if(os.path.isfile(foundPath)):
entries.append(foundPath)
return entries
def getDirsFromPath(self,path):
entries = []
if self.tarcache != None:
for key in self.tarcache:
if path in key:
entries.append(key)
else:
fullPath = os.path.join(self.filename,path)
if os.path.exists(fullPath):
pathsEntries = os.listdir(fullPath)
for entry in pathsEntries:
foundPath = os.path.join(fullPath,entry)
if(os.path.isdir(foundPath)):
entries.append(foundPath)
return entries
def parseYaml(self, content):
return yaml.safe_load(content)
| 31.647619 | 88 | 0.476076 | import yaml
import os.path
from os import path
import tarfile
class MustGatherAccessor:
tarcache = None
tar = None
name = "must-gather accessor"
def __init__ (self,filename):
self.filename = filename
def readfile(self):
if path.isdir(self.filename):
pass
else:
try:
self.tar = tarfile.open(self.filename,"r:gz")
except ValueError:
try:
self.tar = tarfile.open(self.filename,"r")
except ValueError:
raise
self.buildTarCache(self.tar)
def buildTarCache(self,tar):
self.tarcache = {}
for member in tar.getmembers():
if member.isfile() != True:
continue
splits = member.name.split("/")
if splits[0].startswith("must-gather"):
splits.remove(splits[0])
first=True
member.name=""
for part in splits:
if first:
first = False
else:
member.name+="/"
member.name+=part
self.tarcache[member.name] = member
def getValueFromObj(self,obj,*props):
if obj != None:
for prop in props:
if prop in obj:
obj = obj[prop]
else:
obj = None
break
return obj
def getFileContent(self,thepath):
content = None
if self.tarcache == None:
if path.exists(thepath):
f = open(thepath,"r")
content = f.read()
else:
f=self.tar.extractfile(thepath)
content = f.read()
return content
def getEntriesFromPath(self,path):
entries = []
if self.tarcache != None:
for key in self.tarcache:
if path in key:
entries.append(key)
else:
fullPath = os.path.join(self.filename,path)
if os.path.exists(fullPath):
pathsEntries = os.listdir(fullPath)
for entry in pathsEntries:
foundPath = os.path.join(fullPath,entry)
if(os.path.isfile(foundPath)):
entries.append(foundPath)
return entries
def getDirsFromPath(self,path):
entries = []
if self.tarcache != None:
for key in self.tarcache:
if path in key:
entries.append(key)
else:
fullPath = os.path.join(self.filename,path)
if os.path.exists(fullPath):
pathsEntries = os.listdir(fullPath)
for entry in pathsEntries:
foundPath = os.path.join(fullPath,entry)
if(os.path.isdir(foundPath)):
entries.append(foundPath)
return entries
def parseYaml(self, content):
return yaml.safe_load(content)
| true | true |
f723b4994184ba630e2af0efd757f41de6e1bb07 | 5,242 | py | Python | vision/tests/unit/gapic/v1/test_image_annotator_client_v1.py | nielm/google-cloud-python | fd126fdea34206109eb00d675374ff7dc4dcc5ef | [
"Apache-2.0"
] | 1 | 2019-01-23T21:54:51.000Z | 2019-01-23T21:54:51.000Z | vision/tests/unit/gapic/v1/test_image_annotator_client_v1.py | nielm/google-cloud-python | fd126fdea34206109eb00d675374ff7dc4dcc5ef | [
"Apache-2.0"
] | 1 | 2018-04-06T19:51:23.000Z | 2018-04-06T19:51:23.000Z | vision/tests/unit/gapic/v1/test_image_annotator_client_v1.py | nielm/google-cloud-python | fd126fdea34206109eb00d675374ff7dc4dcc5ef | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import vision_v1
from google.cloud.vision_v1.proto import image_annotator_pb2
from google.longrunning import operations_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestImageAnnotatorClient(object):
def test_batch_annotate_images(self):
# Setup Expected Response
expected_response = {}
expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.batch_annotate_images(requests)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_batch_annotate_images_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup request
requests = []
with pytest.raises(CustomException):
client.batch_annotate_images(requests)
def test_async_batch_annotate_files(self):
# Setup Expected Response
expected_response = {}
expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
**expected_response
)
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files", done=True
)
operation.response.Pack(expected_response)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.async_batch_annotate_files(requests)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_async_batch_annotate_files_exception(self):
# Setup Response
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files_exception", done=True
)
operation.error.CopyFrom(error)
# Mock the API response
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
# Setup Request
requests = []
response = client.async_batch_annotate_files(requests)
exception = response.exception()
assert exception.errors[0] == error
| 32.968553 | 87 | 0.681801 |
import mock
import pytest
from google.rpc import status_pb2
from google.cloud import vision_v1
from google.cloud.vision_v1.proto import image_annotator_pb2
from google.longrunning import operations_pb2
class MultiCallableStub(object):
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestImageAnnotatorClient(object):
def test_batch_annotate_images(self):
expected_response = {}
expected_response = image_annotator_pb2.BatchAnnotateImagesResponse(
**expected_response
)
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
requests = []
response = client.batch_annotate_images(requests)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.BatchAnnotateImagesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_batch_annotate_images_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
requests = []
with pytest.raises(CustomException):
client.batch_annotate_images(requests)
def test_async_batch_annotate_files(self):
expected_response = {}
expected_response = image_annotator_pb2.AsyncBatchAnnotateFilesResponse(
**expected_response
)
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files", done=True
)
operation.response.Pack(expected_response)
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
requests = []
response = client.async_batch_annotate_files(requests)
result = response.result()
assert expected_response == result
assert len(channel.requests) == 1
expected_request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(
requests=requests
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_async_batch_annotate_files_exception(self):
error = status_pb2.Status()
operation = operations_pb2.Operation(
name="operations/test_async_batch_annotate_files_exception", done=True
)
operation.error.CopyFrom(error)
channel = ChannelStub(responses=[operation])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = vision_v1.ImageAnnotatorClient()
requests = []
response = client.async_batch_annotate_files(requests)
exception = response.exception()
assert exception.errors[0] == error
| true | true |
f723b4aa1f30b8829647c7182eab4c6610d10e07 | 845 | py | Python | lib/jayrboltonTest/contig_filter_util.py | jayrbolton/kbase_tutorial_contig_filter | 66b5f50db8f584e13a0923bda6f0008c12438d88 | [
"MIT"
] | null | null | null | lib/jayrboltonTest/contig_filter_util.py | jayrbolton/kbase_tutorial_contig_filter | 66b5f50db8f584e13a0923bda6f0008c12438d88 | [
"MIT"
] | null | null | null | lib/jayrboltonTest/contig_filter_util.py | jayrbolton/kbase_tutorial_contig_filter | 66b5f50db8f584e13a0923bda6f0008c12438d88 | [
"MIT"
] | null | null | null | from Bio import SeqIO
def contig_filter(input_path, filtered_path, min_length):
# Inside {username}ContigFilterImpl#run_{username}ContigFilter_max, after you have fetched the fasta file:
# Parse the downloaded file in FASTA format
parsed_assembly = SeqIO.parse(input_path, 'fasta')
min_length = min_length
# Keep a list of contigs greater than min_length
good_contigs = []
# total contigs regardless of length
n_total = 0
# total contigs over the min_length
n_remaining = 0
for record in list(parsed_assembly):
n_total += 1
if len(record.seq) >= min_length:
good_contigs.append(record)
n_remaining += 1
output = {
'n_total': n_total,
'n_remaining': n_remaining
}
SeqIO.write(good_contigs, filtered_path, 'fasta')
return output
| 32.5 | 110 | 0.678107 | from Bio import SeqIO
def contig_filter(input_path, filtered_path, min_length):
ngth = min_length
good_contigs = []
n_total = 0
n_remaining = 0
for record in list(parsed_assembly):
n_total += 1
if len(record.seq) >= min_length:
good_contigs.append(record)
n_remaining += 1
output = {
'n_total': n_total,
'n_remaining': n_remaining
}
SeqIO.write(good_contigs, filtered_path, 'fasta')
return output
| true | true |
f723b5aababa8476e324ce06e1a59d0b7db72c76 | 1,881 | py | Python | okapi/urls.py | jbbqqf/okapi | 3db29ef1e15685fae304190bd176f75c4e367d03 | [
"BSD-3-Clause"
] | null | null | null | okapi/urls.py | jbbqqf/okapi | 3db29ef1e15685fae304190bd176f75c4e367d03 | [
"BSD-3-Clause"
] | null | null | null | okapi/urls.py | jbbqqf/okapi | 3db29ef1e15685fae304190bd176f75c4e367d03 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""okapi URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^okauth/', include('okauth.urls')),
url(r'^users/', include('profiles.urls')),
url(r'^users/', include('groups.urls')),
url(r'^users/', include('online.urls')),
url(r'^chat/', include('chat.urls')),
url(r'^share/', include('fileshare.urls')),
url(r'^prefs/', include('preferences.urls')),
url(r'^news/', include('news.urls')),
url(r'^grades/', include('grades.urls')),
url(r'^library/', include('library.urls')),
url(r'^score/', include('score.urls')),
url(r'^button/', include('button.urls')),
url(r'^{}(.*)$'.format(settings.PRIVATE_MEDIA_URL.lstrip('/')),
'common.private_media.serve_private_media'),
url(r'^docs/', include('rest_framework_swagger.urls')),
]
if settings.DEBUG:
# admin webinterface
urlpatterns += staticfiles_urlpatterns()
# serving media files
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| 35.490566 | 77 | 0.66773 |
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^okauth/', include('okauth.urls')),
url(r'^users/', include('profiles.urls')),
url(r'^users/', include('groups.urls')),
url(r'^users/', include('online.urls')),
url(r'^chat/', include('chat.urls')),
url(r'^share/', include('fileshare.urls')),
url(r'^prefs/', include('preferences.urls')),
url(r'^news/', include('news.urls')),
url(r'^grades/', include('grades.urls')),
url(r'^library/', include('library.urls')),
url(r'^score/', include('score.urls')),
url(r'^button/', include('button.urls')),
url(r'^{}(.*)$'.format(settings.PRIVATE_MEDIA_URL.lstrip('/')),
'common.private_media.serve_private_media'),
url(r'^docs/', include('rest_framework_swagger.urls')),
]
if settings.DEBUG:
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL,
document_root=settings.MEDIA_ROOT)
| true | true |
f723b70c324fc4c0f3106b87f660b2c3aa92b9e8 | 6,885 | py | Python | thingsboard_gateway/tb_utility/tb_utility.py | meuron-io/thingsboard-gateway | 26e7299047507c74613aeea1f19dcdba2e1e5644 | [
"Apache-2.0"
] | 1,123 | 2017-02-07T13:09:40.000Z | 2022-03-30T10:40:48.000Z | thingsboard_gateway/tb_utility/tb_utility.py | meuron-io/thingsboard-gateway | 26e7299047507c74613aeea1f19dcdba2e1e5644 | [
"Apache-2.0"
] | 655 | 2017-03-07T17:25:55.000Z | 2022-03-31T07:59:53.000Z | thingsboard_gateway/tb_utility/tb_utility.py | meuron-io/thingsboard-gateway | 26e7299047507c74613aeea1f19dcdba2e1e5644 | [
"Apache-2.0"
] | 648 | 2017-02-07T13:32:30.000Z | 2022-03-31T05:17:55.000Z | # Copyright 2021. ThingsBoard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from logging import getLogger
from re import search, findall
from jsonpath_rw import parse
from simplejson import JSONDecodeError, dumps, loads
log = getLogger("service")
class TBUtility:
@staticmethod
def decode(message):
try:
if isinstance(message.payload, bytes):
content = loads(message.payload.decode("utf-8", "ignore"))
else:
content = loads(message.payload)
except JSONDecodeError:
try:
content = message.payload.decode("utf-8", "ignore")
except JSONDecodeError:
content = message.payload
return content
@staticmethod
def validate_converted_data(data):
error = None
if error is None and not data.get("deviceName"):
error = 'deviceName is empty in data: '
if error is None and not data.get("deviceType"):
error = 'deviceType is empty in data: '
if error is None:
got_attributes = False
got_telemetry = False
if data.get("attributes") is not None and len(data.get("attributes")) > 0:
got_attributes = True
if data.get("telemetry") is not None:
for entry in data.get("telemetry"):
if (entry.get("ts") is not None and len(entry.get("values")) > 0) or entry.get("ts") is None:
got_telemetry = True
break
if got_attributes is False and got_telemetry is False:
error = 'No telemetry and attributes in data: '
if error is not None:
json_data = dumps(data)
if isinstance(json_data, bytes):
log.error(error + json_data.decode("UTF-8"))
else:
log.error(error + json_data)
return False
return True
@staticmethod
def topic_to_regex(topic):
return topic.replace("+", "[^/]+").replace("#", ".+")
@staticmethod
def regex_to_topic(regex):
return regex.replace("[^/]+", "+").replace(".+", "#")
@staticmethod
def get_value(expression, body=None, value_type="string", get_tag=False, expression_instead_none=False):
if isinstance(body, str):
body = loads(body)
if not expression:
return ''
positions = search(r'\${(?:(.*))}', expression)
if positions is not None:
p1 = positions.regs[-1][0]
p2 = positions.regs[-1][1]
else:
p1 = 0
p2 = len(expression)
target_str = str(expression[p1:p2])
if get_tag:
return target_str
full_value = None
try:
if isinstance(body, dict) and target_str.split()[0] in body:
if value_type.lower() == "string":
full_value = str(expression[0: max(p1 - 2, 0)]) + str(body[target_str.split()[0]]) + str(expression[
p2 + 1:len(
expression)])
else:
full_value = body.get(target_str.split()[0])
elif isinstance(body, (dict, list)):
try:
jsonpath_expression = parse(target_str)
jsonpath_match = jsonpath_expression.find(body)
if jsonpath_match:
full_value = jsonpath_match[0].value
except Exception as e:
log.debug(e)
elif isinstance(body, (str, bytes)):
search_result = search(expression, body)
if search_result.groups():
full_value = search_result.group(0)
if expression_instead_none and full_value is None:
full_value = expression
except Exception as e:
log.exception(e)
return full_value
@staticmethod
def get_values(expression, body=None, value_type="string", get_tag=False, expression_instead_none=False):
expression_arr = findall(r'\$\{[${A-Za-z0-9.^\]\[*_]*\}', expression)
values = [TBUtility.get_value(exp, body, value_type=value_type, get_tag=get_tag,
expression_instead_none=expression_instead_none) for exp in expression_arr]
if '${' not in expression:
values.append(expression)
return values
@staticmethod
def install_package(package, version="upgrade"):
from sys import executable
from subprocess import check_call, CalledProcessError
result = False
if version.lower() == "upgrade":
try:
result = check_call([executable, "-m", "pip", "install", package, "--upgrade", "--user"])
except CalledProcessError:
result = check_call([executable, "-m", "pip", "install", package, "--upgrade"])
else:
from pkg_resources import get_distribution
current_package_version = None
try:
current_package_version = get_distribution(package)
except Exception:
pass
if current_package_version is None or current_package_version != version:
installation_sign = "==" if ">=" not in version else ""
try:
result = check_call(
[executable, "-m", "pip", "install", package + installation_sign + version, "--user"])
except CalledProcessError:
result = check_call([executable, "-m", "pip", "install", package + installation_sign + version])
return result
@staticmethod
def replace_params_tags(text, data):
if '${' in text:
for item in text.split('/'):
if '${' in item:
tag = '${' + TBUtility.get_value(item, data['data'], 'params', get_tag=True) + '}'
value = TBUtility.get_value(item, data['data'], 'params', expression_instead_none=True)
text = text.replace(tag, str(value))
return text
| 40.263158 | 126 | 0.548729 |
from logging import getLogger
from re import search, findall
from jsonpath_rw import parse
from simplejson import JSONDecodeError, dumps, loads
log = getLogger("service")
class TBUtility:
@staticmethod
def decode(message):
try:
if isinstance(message.payload, bytes):
content = loads(message.payload.decode("utf-8", "ignore"))
else:
content = loads(message.payload)
except JSONDecodeError:
try:
content = message.payload.decode("utf-8", "ignore")
except JSONDecodeError:
content = message.payload
return content
@staticmethod
def validate_converted_data(data):
error = None
if error is None and not data.get("deviceName"):
error = 'deviceName is empty in data: '
if error is None and not data.get("deviceType"):
error = 'deviceType is empty in data: '
if error is None:
got_attributes = False
got_telemetry = False
if data.get("attributes") is not None and len(data.get("attributes")) > 0:
got_attributes = True
if data.get("telemetry") is not None:
for entry in data.get("telemetry"):
if (entry.get("ts") is not None and len(entry.get("values")) > 0) or entry.get("ts") is None:
got_telemetry = True
break
if got_attributes is False and got_telemetry is False:
error = 'No telemetry and attributes in data: '
if error is not None:
json_data = dumps(data)
if isinstance(json_data, bytes):
log.error(error + json_data.decode("UTF-8"))
else:
log.error(error + json_data)
return False
return True
@staticmethod
def topic_to_regex(topic):
return topic.replace("+", "[^/]+").replace("#", ".+")
@staticmethod
def regex_to_topic(regex):
return regex.replace("[^/]+", "+").replace(".+", "#")
@staticmethod
def get_value(expression, body=None, value_type="string", get_tag=False, expression_instead_none=False):
if isinstance(body, str):
body = loads(body)
if not expression:
return ''
positions = search(r'\${(?:(.*))}', expression)
if positions is not None:
p1 = positions.regs[-1][0]
p2 = positions.regs[-1][1]
else:
p1 = 0
p2 = len(expression)
target_str = str(expression[p1:p2])
if get_tag:
return target_str
full_value = None
try:
if isinstance(body, dict) and target_str.split()[0] in body:
if value_type.lower() == "string":
full_value = str(expression[0: max(p1 - 2, 0)]) + str(body[target_str.split()[0]]) + str(expression[
p2 + 1:len(
expression)])
else:
full_value = body.get(target_str.split()[0])
elif isinstance(body, (dict, list)):
try:
jsonpath_expression = parse(target_str)
jsonpath_match = jsonpath_expression.find(body)
if jsonpath_match:
full_value = jsonpath_match[0].value
except Exception as e:
log.debug(e)
elif isinstance(body, (str, bytes)):
search_result = search(expression, body)
if search_result.groups():
full_value = search_result.group(0)
if expression_instead_none and full_value is None:
full_value = expression
except Exception as e:
log.exception(e)
return full_value
@staticmethod
def get_values(expression, body=None, value_type="string", get_tag=False, expression_instead_none=False):
expression_arr = findall(r'\$\{[${A-Za-z0-9.^\]\[*_]*\}', expression)
values = [TBUtility.get_value(exp, body, value_type=value_type, get_tag=get_tag,
expression_instead_none=expression_instead_none) for exp in expression_arr]
if '${' not in expression:
values.append(expression)
return values
@staticmethod
def install_package(package, version="upgrade"):
from sys import executable
from subprocess import check_call, CalledProcessError
result = False
if version.lower() == "upgrade":
try:
result = check_call([executable, "-m", "pip", "install", package, "--upgrade", "--user"])
except CalledProcessError:
result = check_call([executable, "-m", "pip", "install", package, "--upgrade"])
else:
from pkg_resources import get_distribution
current_package_version = None
try:
current_package_version = get_distribution(package)
except Exception:
pass
if current_package_version is None or current_package_version != version:
installation_sign = "==" if ">=" not in version else ""
try:
result = check_call(
[executable, "-m", "pip", "install", package + installation_sign + version, "--user"])
except CalledProcessError:
result = check_call([executable, "-m", "pip", "install", package + installation_sign + version])
return result
@staticmethod
def replace_params_tags(text, data):
if '${' in text:
for item in text.split('/'):
if '${' in item:
tag = '${' + TBUtility.get_value(item, data['data'], 'params', get_tag=True) + '}'
value = TBUtility.get_value(item, data['data'], 'params', expression_instead_none=True)
text = text.replace(tag, str(value))
return text
| true | true |
f723b735cf5e93033b633872fb57a7d0a690f5b6 | 37,513 | py | Python | taxcalc/tests/test_calculate.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | 1 | 2021-02-23T21:03:43.000Z | 2021-02-23T21:03:43.000Z | taxcalc/tests/test_calculate.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | null | null | null | taxcalc/tests/test_calculate.py | ClarePan/Tax-Calculator | d2d6cb4b551f34017db7166d91d982b5c4670816 | [
"CC0-1.0"
] | null | null | null | # CODING-STYLE CHECKS:
# pycodestyle test_calculate.py
import os
import json
from io import StringIO
import tempfile
import copy
import six
import pytest
import numpy as np
import pandas as pd
from taxcalc import Policy, Records, Calculator, Behavior, Consumption
RAWINPUTFILE_FUNITS = 4
RAWINPUTFILE_YEAR = 2015
RAWINPUTFILE_CONTENTS = (
'RECID,MARS\n'
'1,2\n'
'2,1\n'
'3,4\n'
'4,3\n'
)
@pytest.fixture(scope='module', name='rawinputfile')
def fixture_rawinputfile():
"""
Temporary input file that contains the minimum required input varaibles.
"""
ifile = tempfile.NamedTemporaryFile(mode='a', delete=False)
ifile.write(RAWINPUTFILE_CONTENTS)
ifile.close()
# must close and then yield for Windows platform
yield ifile
if os.path.isfile(ifile.name):
try:
os.remove(ifile.name)
except OSError:
pass # sometimes we can't remove a generated temporary file
@pytest.fixture(scope='module', name='policyfile')
def fixture_policyfile():
txt = """{"_almdep": {"value": [7150, 7250, 7400]},
"_almsep": {"value": [40400, 41050]},
"_rt5": {"value": [0.33 ]},
"_rt7": {"value": [0.396]}}"""
f = tempfile.NamedTemporaryFile(mode="a", delete=False)
f.write(txt + "\n")
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
def test_make_calculator(cps_subsample):
syr = 2014
pol = Policy(start_year=syr, num_years=9)
assert pol.current_year == syr
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
consump = Consumption()
consump.update_consumption({syr: {'_MPC_e20400': [0.05]}})
assert consump.current_year == Consumption.JSON_START_YEAR
calc = Calculator(policy=pol, records=rec,
consumption=consump, behavior=Behavior())
assert calc.current_year == syr
assert calc.records_current_year() == syr
# test incorrect Calculator instantiation:
with pytest.raises(ValueError):
Calculator(policy=None, records=rec)
with pytest.raises(ValueError):
Calculator(policy=pol, records=None)
with pytest.raises(ValueError):
Calculator(policy=pol, records=rec, behavior=list())
with pytest.raises(ValueError):
Calculator(policy=pol, records=rec, consumption=list())
def test_make_calculator_deepcopy(cps_subsample):
pol = Policy()
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc1 = Calculator(policy=pol, records=rec)
calc2 = copy.deepcopy(calc1)
assert isinstance(calc2, Calculator)
def test_make_calculator_with_policy_reform(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
year = rec.current_year
# create a Policy object and apply a policy reform
pol = Policy()
reform = {2013: {'_II_em': [4000], '_II_em_cpi': False,
'_STD_Aged': [[1600, 1300, 1300, 1600, 1600]],
'_STD_Aged_cpi': False}}
pol.implement_reform(reform)
# create a Calculator object using this policy reform
calc = Calculator(policy=pol, records=rec)
# check that Policy object embedded in Calculator object is correct
assert calc.current_year == year
assert calc.policy_param('II_em') == 4000
assert np.allclose(calc.policy_param('_II_em'),
np.array([4000] * Policy.DEFAULT_NUM_YEARS))
exp_STD_Aged = [[1600, 1300, 1300,
1600, 1600]] * Policy.DEFAULT_NUM_YEARS
assert np.allclose(calc.policy_param('_STD_Aged'),
np.array(exp_STD_Aged))
assert np.allclose(calc.policy_param('STD_Aged'),
np.array([1600, 1300, 1300, 1600, 1600]))
def test_make_calculator_with_multiyear_reform(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
year = rec.current_year
# create a Policy object and apply a policy reform
pol = Policy()
reform = {2015: {}, 2016: {}}
reform[2015]['_II_em'] = [5000, 6000] # reform values for 2015 and 2016
reform[2015]['_II_em_cpi'] = False
reform[2016]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600]]
pol.implement_reform(reform)
# create a Calculator object using this policy-reform
calc = Calculator(policy=pol, records=rec)
# check that Policy object embedded in Calculator object is correct
assert pol.num_years == Policy.DEFAULT_NUM_YEARS
assert calc.current_year == year
assert calc.policy_param('II_em') == 3950
exp_II_em = [3900, 3950, 5000] + [6000] * (Policy.DEFAULT_NUM_YEARS - 3)
assert np.allclose(calc.policy_param('_II_em'),
np.array(exp_II_em))
calc.increment_year()
calc.increment_year()
assert calc.current_year == 2016
assert np.allclose(calc.policy_param('STD_Aged'),
np.array([1600, 1300, 1600, 1300, 1600]))
def test_calculator_advance_to_year(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
pol = Policy()
calc = Calculator(policy=pol, records=rec)
calc.advance_to_year(2016)
assert calc.current_year == 2016
with pytest.raises(ValueError):
calc.advance_to_year(2015)
def test_make_calculator_raises_on_no_policy(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
with pytest.raises(ValueError):
Calculator(records=rec)
def test_calculator_mtr(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calcx = Calculator(policy=Policy(), records=rec)
calcx.calc_all()
combinedx = calcx.array('combined')
c00100x = calcx.array('c00100')
calc = Calculator(policy=Policy(), records=rec)
recs_pre_e00200p = copy.deepcopy(calc.array('e00200p'))
(mtr_ptx, mtr_itx, mtr_cmb) = calc.mtr(variable_str='e00200p',
zero_out_calculated_vars=True)
recs_post_e00200p = calc.array('e00200p')
assert np.allclose(recs_post_e00200p, recs_pre_e00200p)
assert np.allclose(calc.array('combined'), combinedx)
assert np.allclose(calc.array('c00100'), c00100x)
assert np.array_equal(mtr_cmb, mtr_ptx) is False
assert np.array_equal(mtr_ptx, mtr_itx) is False
with pytest.raises(ValueError):
calc.mtr(variable_str='bad_income_type')
(_, _, mtr_combined) = calc.mtr(variable_str='e00200s',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e00650',
negative_finite_diff=True,
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e00900p',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e01700',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e26270',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e00200p',
calc_all_already_called=True)
assert np.allclose(mtr_combined, mtr_cmb)
assert np.allclose(calc.array('combined'), combinedx)
assert np.allclose(calc.array('c00100'), c00100x)
def test_calculator_mtr_when_PT_rates_differ():
reform = {2013: {'_II_rt1': [0.40],
'_II_rt2': [0.40],
'_II_rt3': [0.40],
'_II_rt4': [0.40],
'_II_rt5': [0.40],
'_II_rt6': [0.40],
'_II_rt7': [0.40],
'_PT_rt1': [0.30],
'_PT_rt2': [0.30],
'_PT_rt3': [0.30],
'_PT_rt4': [0.30],
'_PT_rt5': [0.30],
'_PT_rt6': [0.30],
'_PT_rt7': [0.30]}}
funit = (
u'RECID,MARS,FLPDYR,e00200,e00200p,e00900,e00900p,extraneous\n'
u'1, 1, 2009, 200000,200000, 100000,100000, 9999999999\n'
)
rec = Records(pd.read_csv(StringIO(funit)))
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
(_, mtr1, _) = calc1.mtr(variable_str='p23250')
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
(_, mtr2, _) = calc2.mtr(variable_str='p23250')
assert np.allclose(mtr1, mtr2, rtol=0.0, atol=1e-06)
def test_make_calculator_increment_years_first(cps_subsample):
# create Policy object with policy reform
syr = 2013
pol = Policy(start_year=syr)
reform = {2015: {}, 2016: {}}
std5 = 2000
reform[2015]['_STD_Aged'] = [[std5, std5, std5, std5, std5]]
reform[2015]['_II_em'] = [5000]
reform[2016]['_II_em'] = [6000]
reform[2016]['_II_em_cpi'] = False
pol.implement_reform(reform)
# create Calculator object with Policy object as modified by reform
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=pol, records=rec)
# compare expected policy parameter values with those embedded in calc
irates = pol.inflation_rates()
irate2015 = irates[2015 - syr]
irate2016 = irates[2016 - syr]
std6 = std5 * (1.0 + irate2015)
std7 = std6 * (1.0 + irate2016)
exp_STD_Aged = np.array([[1500, 1200, 1200, 1500, 1500],
[1550, 1200, 1200, 1550, 1550],
[std5, std5, std5, std5, std5],
[std6, std6, std6, std6, std6],
[std7, std7, std7, std7, std7]])
act_STD_Aged = calc.policy_param('_STD_Aged')
assert np.allclose(act_STD_Aged[:5], exp_STD_Aged)
exp_II_em = np.array([3900, 3950, 5000, 6000, 6000])
act_II_em = calc.policy_param('_II_em')
assert np.allclose(act_II_em[:5], exp_II_em)
def test_ID_HC_vs_BS(cps_subsample):
"""
Test that complete haircut of itemized deductions produces same
results as a 100% benefit surtax with no benefit deduction.
"""
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
# specify complete-haircut reform policy and Calculator object
hc_reform = {2013: {'_ID_Medical_hc': [1.0],
'_ID_StateLocalTax_hc': [1.0],
'_ID_RealEstate_hc': [1.0],
'_ID_Casualty_hc': [1.0],
'_ID_Miscellaneous_hc': [1.0],
'_ID_InterestPaid_hc': [1.0],
'_ID_Charity_hc': [1.0]}}
hc_policy = Policy()
hc_policy.implement_reform(hc_reform)
hc_calc = Calculator(policy=hc_policy, records=recs)
hc_calc.calc_all()
hc_taxes = hc_calc.dataframe(['iitax', 'payrolltax'])
del hc_calc
# specify benefit-surtax reform policy and Calculator object
bs_reform = {2013: {'_ID_BenefitSurtax_crt': [0.0],
'_ID_BenefitSurtax_trt': [1.0]}}
bs_policy = Policy()
bs_policy.implement_reform(bs_reform)
bs_calc = Calculator(policy=bs_policy, records=recs)
bs_calc.calc_all()
bs_taxes = bs_calc.dataframe(['iitax', 'payrolltax'])
del bs_calc
# compare calculated taxes generated by the two reforms
assert np.allclose(hc_taxes['payrolltax'], bs_taxes['payrolltax'])
assert np.allclose(hc_taxes['iitax'], bs_taxes['iitax'])
def test_ID_StateLocal_HC_vs_CRT(cps_subsample):
"""
Test that a cap on state/local income and sales tax deductions at 0 percent
of AGI is equivalent to a complete haircut on the same state/local tax
deductions.
"""
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
# specify state/local complete haircut reform policy and Calculator object
hc_reform = {2013: {'_ID_StateLocalTax_hc': [1.0]}}
hc_policy = Policy()
hc_policy.implement_reform(hc_reform)
hc_calc = Calculator(policy=hc_policy, records=rec)
hc_calc.calc_all()
# specify AGI cap reform policy and Calculator object
crt_reform = {2013: {'_ID_StateLocalTax_crt': [0.0]}}
crt_policy = Policy()
crt_policy.implement_reform(crt_reform)
crt_calc = Calculator(policy=crt_policy, records=rec)
crt_calc.calc_all()
# compare calculated tax results generated by the two reforms
assert np.allclose(hc_calc.array('payrolltax'),
crt_calc.array('payrolltax'))
assert np.allclose(hc_calc.array('iitax'),
crt_calc.array('iitax'))
def test_ID_RealEstate_HC_vs_CRT(cps_subsample):
"""
Test that a cap on all state, local, and foreign real estate tax deductions
at 0 percent of AGI is equivalent to a complete haircut on the same real
estate tax deductions.
"""
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
# specify real estate complete haircut reform policy and Calculator object
hc_reform = {2013: {'_ID_RealEstate_hc': [1.0]}}
hc_policy = Policy()
hc_policy.implement_reform(hc_reform)
hc_calc = Calculator(policy=hc_policy, records=rec)
hc_calc.calc_all()
# specify AGI cap reform policy and Calculator object
crt_reform = {2013: {'_ID_RealEstate_crt': [0.0]}}
crt_policy = Policy()
crt_policy.implement_reform(crt_reform)
crt_calc = Calculator(policy=crt_policy, records=rec)
crt_calc.calc_all()
# compare calculated tax results generated by the two reforms
assert np.allclose(hc_calc.array('payrolltax'),
crt_calc.array('payrolltax'))
assert np.allclose(hc_calc.array('iitax'),
crt_calc.array('iitax'))
def test_calculator_using_nonstd_input(rawinputfile):
# check Calculator handling of raw, non-standard input data with no aging
pol = Policy()
pol.set_year(RAWINPUTFILE_YEAR) # set policy params to input data year
nonstd = Records(data=rawinputfile.name,
gfactors=None, # keeps raw data unchanged
weights=None,
start_year=RAWINPUTFILE_YEAR) # set raw input data year
assert nonstd.array_length == RAWINPUTFILE_FUNITS
calc = Calculator(policy=pol, records=nonstd,
sync_years=False) # keeps raw data unchanged
assert calc.current_year == RAWINPUTFILE_YEAR
calc.calc_all()
assert calc.weighted_total('e00200') == 0
assert calc.total_weight() == 0
varlist = ['RECID', 'MARS']
pdf = calc.dataframe(varlist)
assert isinstance(pdf, pd.DataFrame)
assert pdf.shape == (RAWINPUTFILE_FUNITS, len(varlist))
mars = calc.array('MARS')
assert isinstance(mars, np.ndarray)
assert mars.shape == (RAWINPUTFILE_FUNITS,)
exp_iitax = np.zeros((nonstd.array_length,))
assert np.allclose(calc.array('iitax'), exp_iitax)
mtr_ptax, _, _ = calc.mtr(wrt_full_compensation=False)
exp_mtr_ptax = np.zeros((nonstd.array_length,))
exp_mtr_ptax.fill(0.153)
assert np.allclose(mtr_ptax, exp_mtr_ptax)
REFORM_CONTENTS = """
// Example of a reform file suitable for read_json_param_objects().
// This JSON file can contain any number of trailing //-style comments, which
// will be removed before the contents are converted from JSON to a dictionary.
// Within each "policy" object, the primary keys are parameters and
// the secondary keys are years.
// Both the primary and secondary key values must be enclosed in quotes (").
// Boolean variables are specified as true or false (no quotes; all lowercase).
// Parameter code in the policy object is enclosed inside a pair of double
// pipe characters (||).
{
"policy": {
"_AMT_brk1": // top of first AMT tax bracket
{"2015": [200000],
"2017": [300000]
},
"_EITC_c": // maximum EITC amount by number of qualifying kids (0,1,2,3+)
{"2016": [[ 900, 5000, 8000, 9000]],
"2019": [[1200, 7000, 10000, 12000]]
},
"_II_em": // personal exemption amount (see indexing changes below)
{"2016": [6000],
"2018": [7500],
"2020": [9000]
},
"_II_em_cpi": // personal exemption amount indexing status
{"2016": false, // values in future years are same as this year value
"2018": true // values in future years indexed with this year as base
},
"_SS_Earnings_c": // social security (OASDI) maximum taxable earnings
{"2016": [300000],
"2018": [500000],
"2020": [700000]
},
"_AMT_em_cpi": // AMT exemption amount indexing status
{"2017": false, // values in future years are same as this year value
"2020": true // values in future years indexed with this year as base
}
}
}
"""
@pytest.fixture(scope='module', name='reform_file')
def fixture_reform_file():
"""
Temporary reform file for read_json_param_objects() function.
"""
rfile = tempfile.NamedTemporaryFile(mode='a', delete=False)
rfile.write(REFORM_CONTENTS)
rfile.close()
# must close and then yield for Windows platform
yield rfile
if os.path.isfile(rfile.name):
try:
os.remove(rfile.name)
except OSError:
pass # sometimes we can't remove a generated temporary file
ASSUMP_CONTENTS = """
// Example of assump file suitable for the read_json_param_objects().
// This JSON file can contain any number of trailing //-style comments, which
// will be removed before the contents are converted from JSON to a dictionary.
// Within each "behavior", "consumption" and "growth" object, the
// primary keys are parameters and the secondary keys are years.
// Both the primary and secondary key values must be enclosed in quotes (").
// Boolean variables are specified as true or false (no quotes; all lowercase).
{
"consumption": { "_MPC_e18400": {"2018": [0.05]} },
"behavior": {},
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
@pytest.fixture(scope='module', name='assump_file')
def fixture_assump_file():
"""
Temporary assumption file for read_json_params_files() function.
"""
afile = tempfile.NamedTemporaryFile(mode='a', delete=False)
afile.write(ASSUMP_CONTENTS)
afile.close()
# must close and then yield for Windows platform
yield afile
if os.path.isfile(afile.name):
try:
os.remove(afile.name)
except OSError:
pass # sometimes we can't remove a generated temporary file
@pytest.mark.parametrize("set_year", [False, True])
def test_read_json_reform_file_and_implement_reform(reform_file,
assump_file,
set_year):
"""
Test reading and translation of reform file into a reform dictionary
that is then used to call implement_reform method and Calculate.calc_all()
NOTE: implement_reform called when policy.current_year == policy.start_year
"""
pol = Policy()
if set_year:
pol.set_year(2015)
param_dict = Calculator.read_json_param_objects(reform_file.name,
assump_file.name)
pol.implement_reform(param_dict['policy'])
syr = pol.start_year
amt_brk1 = pol._AMT_brk1
assert amt_brk1[2015 - syr] == 200000
assert amt_brk1[2016 - syr] > 200000
assert amt_brk1[2017 - syr] == 300000
assert amt_brk1[2018 - syr] > 300000
ii_em = pol._II_em
assert ii_em[2016 - syr] == 6000
assert ii_em[2017 - syr] == 6000
assert ii_em[2018 - syr] == 7500
assert ii_em[2019 - syr] > 7500
assert ii_em[2020 - syr] == 9000
assert ii_em[2021 - syr] > 9000
amt_em = pol._AMT_em
assert amt_em[2016 - syr, 0] > amt_em[2015 - syr, 0]
assert amt_em[2017 - syr, 0] > amt_em[2016 - syr, 0]
assert amt_em[2018 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2019 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2020 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2021 - syr, 0] > amt_em[2020 - syr, 0]
assert amt_em[2022 - syr, 0] > amt_em[2021 - syr, 0]
add4aged = pol._ID_Medical_frt_add4aged
assert add4aged[2015 - syr] == -0.025
assert add4aged[2016 - syr] == -0.025
assert add4aged[2017 - syr] == 0.0
assert add4aged[2022 - syr] == 0.0
@pytest.fixture(scope='module', name='bad1reformfile')
def fixture_bad1reformfile():
# specify JSON text for reform
txt = """
{
"policy": { // example of incorrect JSON because 'x' must be "x"
'x': {"2014": [4000]}
}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad2reformfile')
def fixture_bad2reformfile():
# specify JSON text for reform
txt = """
{
"title": "",
"policyx": { // example of reform file not containing "policy" key
"_SS_Earnings_c": {"2018": [9e99]}
}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad3reformfile')
def fixture_bad3reformfile():
# specify JSON text for reform
txt = """
{
"title": "",
"policy": {
"_SS_Earnings_c": {"2018": [9e99]}
},
"behavior": { // example of misplaced "behavior" key
}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
def test_read_bad_json_reform_file(bad1reformfile, bad2reformfile,
bad3reformfile):
with pytest.raises(ValueError):
Calculator.read_json_param_objects(bad1reformfile.name, None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(bad2reformfile.name, None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(bad3reformfile.name, None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(list(), None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, 'unknown_file_name')
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, list())
@pytest.fixture(scope='module', name='bad1assumpfile')
def fixture_bad1assumpfile():
# specify JSON text for assumptions
txt = """
{
"consumption": {},
"behavior": { // example of incorrect JSON because 'x' must be "x"
'x': {"2014": [0.25]}
},
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad2assumpfile')
def fixture_bad2assumpfile():
# specify JSON text for assumptions
txt = """
{
"consumption": {},
"behaviorx": {}, // example of assump file not containing "behavior" key
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad3assumpfile')
def fixture_bad3assumpfile():
# specify JSON text for assump
txt = """
{
"consumption": {},
"behavior": {},
"growdiff_baseline": {},
"growdiff_response": {},
"policy": { // example of misplaced policy key
"_SS_Earnings_c": {"2018": [9e99]}
},
"growmodel": {}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
def test_read_bad_json_assump_file(bad1assumpfile, bad2assumpfile,
bad3assumpfile):
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, bad1assumpfile.name)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, bad2assumpfile.name)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, bad3assumpfile.name)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, 'unknown_file_name')
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, list())
def test_convert_parameter_dict():
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({2013: {'2013': [40000]}})
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({'_II_em': {2013: [40000]}})
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({4567: {2013: [40000]}})
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({'_II_em': 40000})
rdict = Calculator._convert_parameter_dict({'_II_em': {'2013': [40000]}})
assert isinstance(rdict, dict)
def test_calc_all(reform_file, rawinputfile):
cyr = 2016
pol = Policy()
param_dict = Calculator.read_json_param_objects(reform_file.name, None)
pol.implement_reform(param_dict['policy'])
pol.set_year(cyr)
nonstd = Records(data=rawinputfile.name, gfactors=None,
weights=None, start_year=cyr)
assert nonstd.array_length == RAWINPUTFILE_FUNITS
calc = Calculator(policy=pol, records=nonstd,
sync_years=False) # keeps raw data unchanged
assert calc.current_year == cyr
assert calc.reform_warnings == ''
def test_translate_json_reform_suffixes_mars_non_indexed():
# test read_json_param_objects()
# using MARS-indexed parameter suffixes
json1 = """{"policy": {
"_II_em": {"2020": [20000], "2015": [15000]},
"_AMEDT_ec_joint": {"2018": [400000], "2016": [300000]},
"_AMEDT_ec_separate": {"2017": [150000], "2019": [200000]}
}}"""
pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)
rdict1 = pdict1['policy']
json2 = """{"policy": {
"_AMEDT_ec": {"2016": [[200000, 300000, 125000, 200000, 200000]],
"2017": [[200000, 300000, 150000, 200000, 200000]],
"2018": [[200000, 400000, 150000, 200000, 200000]],
"2019": [[200000, 400000, 200000, 200000, 200000]]},
"_II_em": {"2015": [15000], "2020": [20000]}
}}"""
pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)
rdict2 = pdict2['policy']
assert len(rdict2) == len(rdict1)
for year in rdict2.keys():
if '_II_em' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_II_em'],
rdict2[year]['_II_em'],
atol=0.01, rtol=0.0)
if '_AMEDT_ec' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_AMEDT_ec'],
rdict2[year]['_AMEDT_ec'],
atol=0.01, rtol=0.0)
def test_translate_json_reform_suffixes_eic():
# test read_json_param_objects(...)
# using EIC-indexed parameter suffixes
json1 = """{"policy": {
"_II_em": {"2020": [20000], "2015": [15000]},
"_EITC_c_0kids": {"2018": [510], "2019": [510]},
"_EITC_c_1kid": {"2019": [3400], "2018": [3400]},
"_EITC_c_2kids": {"2018": [5616], "2019": [5616]},
"_EITC_c_3+kids": {"2019": [6318], "2018": [6318]}
}}"""
pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)
rdict1 = pdict1['policy']
json2 = """{"policy": {
"_EITC_c": {"2019": [[510, 3400, 5616, 6318]],
"2018": [[510, 3400, 5616, 6318]]},
"_II_em": {"2020": [20000], "2015": [15000]}
}}"""
pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)
rdict2 = pdict2['policy']
assert len(rdict2) == len(rdict1)
for year in rdict2.keys():
if '_II_em' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_II_em'],
rdict2[year]['_II_em'],
atol=0.01, rtol=0.0)
if '_EITC_c' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_EITC_c'],
rdict2[year]['_EITC_c'],
atol=0.01, rtol=0.0)
def test_translate_json_reform_suffixes_idedtype():
# test read_json_param_objects(...)
# using idedtype-indexed parameter suffixes
json1 = """{"policy": {
"_ID_BenefitCap_rt": {"2019": [0.2]},
"_ID_BenefitCap_Switch_medical": {"2019": [false]},
"_ID_BenefitCap_Switch_casualty": {"2019": [false]},
"_ID_BenefitCap_Switch_misc": {"2019": [false]},
"_ID_BenefitCap_Switch_interest": {"2019": [false]},
"_ID_BenefitCap_Switch_charity": {"2019": [false]},
"_II_em": {"2020": [20000], "2015": [15000]}
}}"""
pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)
rdict1 = pdict1['policy']
json2 = """{"policy": {
"_II_em": {"2020": [20000], "2015": [15000]},
"_ID_BenefitCap_Switch": {
"2019": [[false, true, true, false, false, false, false]]
},
"_ID_BenefitCap_rt": {"2019": [0.2]}
}}"""
pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)
rdict2 = pdict2['policy']
assert len(rdict2) == len(rdict1)
for year in rdict2.keys():
if '_II_em' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_II_em'],
rdict2[year]['_II_em'],
atol=0.01, rtol=0.0)
if '_ID_BenefitCap_rt' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_ID_BenefitCap_rt'],
rdict2[year]['_ID_BenefitCap_rt'],
atol=0.01, rtol=0.0)
if '_ID_BenefitCap_Switch' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_ID_BenefitCap_Switch'],
rdict2[year]['_ID_BenefitCap_Switch'],
atol=0.01, rtol=0.0)
def test_read_json_param_with_suffixes_and_errors():
# test interaction of policy parameter suffixes and reform errors
# (fails without 0.10.2 bug fix as reported by Hank Doupe in PB PR#641)
reform = {
u'policy': {
u'_II_brk4_separate': {u'2017': [5000.0]},
u'_STD_separate': {u'2017': [8000.0]},
u'_STD_single': {u'2018': [1000.0]},
u'_II_brk2_headhousehold': {u'2017': [1000.0]},
u'_II_brk4_single': {u'2017': [500.0]},
u'_STD_joint': {u'2017': [10000.0], u'2020': [150.0]},
u'_II_brk2_separate': {u'2017': [1000.0]},
u'_II_brk2_single': {u'2017': [1000.0]},
u'_II_brk2_joint': {u'2017': [1000.0]},
u'_FICA_ss_trt': {u'2017': [-1.0], u'2019': [0.1]},
u'_II_brk4_headhousehold': {u'2017': [500.0]},
u'_STD_headhousehold': {u'2017': [10000.0], u'2020': [150.0]},
u'_II_brk4_joint': {u'2017': [500.0]},
u'_ID_BenefitSurtax_Switch_medical': {u'2017': [True]}
}
}
json_reform = json.dumps(reform)
params = Calculator.read_json_param_objects(json_reform, None)
assert isinstance(params, dict)
pol = Policy()
pol.ignore_reform_errors()
pol.implement_reform(params['policy'],
print_warnings=False, raise_errors=False)
assert len(pol.parameter_errors) > 0
assert len(pol.parameter_warnings) > 0
def test_noreform_documentation():
reform_json = """
{
"policy": {}
}
"""
assump_json = """
{
"consumption": {},
"behavior": {},
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
params = Calculator.read_json_param_objects(reform_json, assump_json)
assert isinstance(params, dict)
actual_doc = Calculator.reform_documentation(params)
expected_doc = (
'REFORM DOCUMENTATION\n'
'Baseline Growth-Difference Assumption Values by Year:\n'
'none: using default baseline growth assumptions\n'
'Policy Reform Parameter Values by Year:\n'
'none: using current-law policy parameters\n'
)
assert actual_doc == expected_doc
def test_reform_documentation():
reform_json = """
{
"policy": {
"_II_em_cpi": {"2016": false,
"2018": true},
"_II_em": {"2016": [5000],
"2018": [6000],
"2020": [7000]},
"_EITC_indiv": {"2017": [true]},
"_STD_Aged_cpi": {"2016": false},
"_STD_Aged": {"2016": [[1600, 1300, 1300, 1600, 1600]],
"2020": [[2000, 2000, 2000, 2000, 2000]]},
"_ID_BenefitCap_Switch_medical": {"2020": [false]},
"_ID_BenefitCap_Switch_casualty": {"2020": [false]},
"_ID_BenefitCap_Switch_misc": {"2020": [false]},
"_ID_BenefitCap_Switch_interest": {"2020": [false]},
"_ID_BenefitCap_Switch_charity": {"2020": [false]}
}
}
"""
assump_json = """
{
"consumption": {},
"behavior": {},
// increase baseline inflation rate by one percentage point in 2014+
// (has no effect on known policy parameter values)
"growdiff_baseline": {"_ACPIU": {"2014": [0.01]}},
"growdiff_response": {},
"growmodel": {}
}
"""
params = Calculator.read_json_param_objects(reform_json, assump_json)
assert isinstance(params, dict)
doc = Calculator.reform_documentation(params)
assert isinstance(doc, six.string_types)
dump = False # set to True to print documentation and force test failure
if dump:
print(doc)
assert 1 == 2
def test_distribution_tables(cps_subsample):
pol = Policy()
recs = Records.cps_constructor(data=cps_subsample)
calc1 = Calculator(policy=pol, records=recs)
assert calc1.current_year == 2014
calc1.calc_all()
dt1, dt2 = calc1.distribution_tables(None, 'weighted_deciles')
assert isinstance(dt1, pd.DataFrame)
assert dt2 is None
dt1, dt2 = calc1.distribution_tables(calc1, 'weighted_deciles')
assert isinstance(dt1, pd.DataFrame)
assert isinstance(dt2, pd.DataFrame)
reform = {2014: {'_UBI_u18': [1000],
'_UBI_1820': [1000],
'_UBI_21': [1000]}}
pol.implement_reform(reform)
assert not pol.parameter_errors
calc2 = Calculator(policy=pol, records=recs)
calc2.calc_all()
dt1, dt2 = calc1.distribution_tables(calc2, 'weighted_deciles')
assert isinstance(dt1, pd.DataFrame)
assert isinstance(dt2, pd.DataFrame)
def test_difference_table(cps_subsample):
cyr = 2014
pol = Policy()
recs = Records.cps_constructor(data=cps_subsample)
calc1 = Calculator(policy=pol, records=recs)
assert calc1.current_year == cyr
reform = {cyr: {'_SS_Earnings_c': [9e99]}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=recs)
assert calc2.current_year == cyr
calc1.calc_all()
calc2.calc_all()
diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')
assert isinstance(diff, pd.DataFrame)
def test_diagnostic_table(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
adt = calc.diagnostic_table(3)
assert isinstance(adt, pd.DataFrame)
def test_mtr_graph(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
fig = calc.mtr_graph(calc,
mars=2,
income_measure='wages',
mtr_measure='ptax')
assert fig
fig = calc.mtr_graph(calc,
income_measure='agi',
mtr_measure='itax')
assert fig
def test_atr_graph(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
fig = calc.atr_graph(calc, mars=2, atr_measure='itax')
assert fig
fig = calc.atr_graph(calc, atr_measure='ptax')
assert fig
def test_privacy_of_embedded_objects(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
with pytest.raises(AttributeError):
cyr = calc.__policy.current_year
with pytest.raises(AttributeError):
wgh = calc.__records.s006
with pytest.raises(AttributeError):
cyr = calc.__consumption.current_year
with pytest.raises(AttributeError):
cyr = calc.__behavior.current_year
def test_n65(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
assert calc.n65().sum() > 1500
| 38.200611 | 79 | 0.632208 |
import os
import json
from io import StringIO
import tempfile
import copy
import six
import pytest
import numpy as np
import pandas as pd
from taxcalc import Policy, Records, Calculator, Behavior, Consumption
RAWINPUTFILE_FUNITS = 4
RAWINPUTFILE_YEAR = 2015
RAWINPUTFILE_CONTENTS = (
'RECID,MARS\n'
'1,2\n'
'2,1\n'
'3,4\n'
'4,3\n'
)
@pytest.fixture(scope='module', name='rawinputfile')
def fixture_rawinputfile():
ifile = tempfile.NamedTemporaryFile(mode='a', delete=False)
ifile.write(RAWINPUTFILE_CONTENTS)
ifile.close()
yield ifile
if os.path.isfile(ifile.name):
try:
os.remove(ifile.name)
except OSError:
pass
@pytest.fixture(scope='module', name='policyfile')
def fixture_policyfile():
txt = """{"_almdep": {"value": [7150, 7250, 7400]},
"_almsep": {"value": [40400, 41050]},
"_rt5": {"value": [0.33 ]},
"_rt7": {"value": [0.396]}}"""
f = tempfile.NamedTemporaryFile(mode="a", delete=False)
f.write(txt + "\n")
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
def test_make_calculator(cps_subsample):
syr = 2014
pol = Policy(start_year=syr, num_years=9)
assert pol.current_year == syr
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
consump = Consumption()
consump.update_consumption({syr: {'_MPC_e20400': [0.05]}})
assert consump.current_year == Consumption.JSON_START_YEAR
calc = Calculator(policy=pol, records=rec,
consumption=consump, behavior=Behavior())
assert calc.current_year == syr
assert calc.records_current_year() == syr
# test incorrect Calculator instantiation:
with pytest.raises(ValueError):
Calculator(policy=None, records=rec)
with pytest.raises(ValueError):
Calculator(policy=pol, records=None)
with pytest.raises(ValueError):
Calculator(policy=pol, records=rec, behavior=list())
with pytest.raises(ValueError):
Calculator(policy=pol, records=rec, consumption=list())
def test_make_calculator_deepcopy(cps_subsample):
pol = Policy()
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc1 = Calculator(policy=pol, records=rec)
calc2 = copy.deepcopy(calc1)
assert isinstance(calc2, Calculator)
def test_make_calculator_with_policy_reform(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
year = rec.current_year
# create a Policy object and apply a policy reform
pol = Policy()
reform = {2013: {'_II_em': [4000], '_II_em_cpi': False,
'_STD_Aged': [[1600, 1300, 1300, 1600, 1600]],
'_STD_Aged_cpi': False}}
pol.implement_reform(reform)
# create a Calculator object using this policy reform
calc = Calculator(policy=pol, records=rec)
# check that Policy object embedded in Calculator object is correct
assert calc.current_year == year
assert calc.policy_param('II_em') == 4000
assert np.allclose(calc.policy_param('_II_em'),
np.array([4000] * Policy.DEFAULT_NUM_YEARS))
exp_STD_Aged = [[1600, 1300, 1300,
1600, 1600]] * Policy.DEFAULT_NUM_YEARS
assert np.allclose(calc.policy_param('_STD_Aged'),
np.array(exp_STD_Aged))
assert np.allclose(calc.policy_param('STD_Aged'),
np.array([1600, 1300, 1300, 1600, 1600]))
def test_make_calculator_with_multiyear_reform(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
year = rec.current_year
# create a Policy object and apply a policy reform
pol = Policy()
reform = {2015: {}, 2016: {}}
reform[2015]['_II_em'] = [5000, 6000] # reform values for 2015 and 2016
reform[2015]['_II_em_cpi'] = False
reform[2016]['_STD_Aged'] = [[1600, 1300, 1600, 1300, 1600]]
pol.implement_reform(reform)
# create a Calculator object using this policy-reform
calc = Calculator(policy=pol, records=rec)
# check that Policy object embedded in Calculator object is correct
assert pol.num_years == Policy.DEFAULT_NUM_YEARS
assert calc.current_year == year
assert calc.policy_param('II_em') == 3950
exp_II_em = [3900, 3950, 5000] + [6000] * (Policy.DEFAULT_NUM_YEARS - 3)
assert np.allclose(calc.policy_param('_II_em'),
np.array(exp_II_em))
calc.increment_year()
calc.increment_year()
assert calc.current_year == 2016
assert np.allclose(calc.policy_param('STD_Aged'),
np.array([1600, 1300, 1600, 1300, 1600]))
def test_calculator_advance_to_year(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
pol = Policy()
calc = Calculator(policy=pol, records=rec)
calc.advance_to_year(2016)
assert calc.current_year == 2016
with pytest.raises(ValueError):
calc.advance_to_year(2015)
def test_make_calculator_raises_on_no_policy(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
with pytest.raises(ValueError):
Calculator(records=rec)
def test_calculator_mtr(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calcx = Calculator(policy=Policy(), records=rec)
calcx.calc_all()
combinedx = calcx.array('combined')
c00100x = calcx.array('c00100')
calc = Calculator(policy=Policy(), records=rec)
recs_pre_e00200p = copy.deepcopy(calc.array('e00200p'))
(mtr_ptx, mtr_itx, mtr_cmb) = calc.mtr(variable_str='e00200p',
zero_out_calculated_vars=True)
recs_post_e00200p = calc.array('e00200p')
assert np.allclose(recs_post_e00200p, recs_pre_e00200p)
assert np.allclose(calc.array('combined'), combinedx)
assert np.allclose(calc.array('c00100'), c00100x)
assert np.array_equal(mtr_cmb, mtr_ptx) is False
assert np.array_equal(mtr_ptx, mtr_itx) is False
with pytest.raises(ValueError):
calc.mtr(variable_str='bad_income_type')
(_, _, mtr_combined) = calc.mtr(variable_str='e00200s',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e00650',
negative_finite_diff=True,
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e00900p',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e01700',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e26270',
calc_all_already_called=True)
assert isinstance(mtr_combined, np.ndarray)
(_, _, mtr_combined) = calc.mtr(variable_str='e00200p',
calc_all_already_called=True)
assert np.allclose(mtr_combined, mtr_cmb)
assert np.allclose(calc.array('combined'), combinedx)
assert np.allclose(calc.array('c00100'), c00100x)
def test_calculator_mtr_when_PT_rates_differ():
reform = {2013: {'_II_rt1': [0.40],
'_II_rt2': [0.40],
'_II_rt3': [0.40],
'_II_rt4': [0.40],
'_II_rt5': [0.40],
'_II_rt6': [0.40],
'_II_rt7': [0.40],
'_PT_rt1': [0.30],
'_PT_rt2': [0.30],
'_PT_rt3': [0.30],
'_PT_rt4': [0.30],
'_PT_rt5': [0.30],
'_PT_rt6': [0.30],
'_PT_rt7': [0.30]}}
funit = (
u'RECID,MARS,FLPDYR,e00200,e00200p,e00900,e00900p,extraneous\n'
u'1, 1, 2009, 200000,200000, 100000,100000, 9999999999\n'
)
rec = Records(pd.read_csv(StringIO(funit)))
pol = Policy()
calc1 = Calculator(policy=pol, records=rec)
(_, mtr1, _) = calc1.mtr(variable_str='p23250')
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=rec)
(_, mtr2, _) = calc2.mtr(variable_str='p23250')
assert np.allclose(mtr1, mtr2, rtol=0.0, atol=1e-06)
def test_make_calculator_increment_years_first(cps_subsample):
# create Policy object with policy reform
syr = 2013
pol = Policy(start_year=syr)
reform = {2015: {}, 2016: {}}
std5 = 2000
reform[2015]['_STD_Aged'] = [[std5, std5, std5, std5, std5]]
reform[2015]['_II_em'] = [5000]
reform[2016]['_II_em'] = [6000]
reform[2016]['_II_em_cpi'] = False
pol.implement_reform(reform)
# create Calculator object with Policy object as modified by reform
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=pol, records=rec)
# compare expected policy parameter values with those embedded in calc
irates = pol.inflation_rates()
irate2015 = irates[2015 - syr]
irate2016 = irates[2016 - syr]
std6 = std5 * (1.0 + irate2015)
std7 = std6 * (1.0 + irate2016)
exp_STD_Aged = np.array([[1500, 1200, 1200, 1500, 1500],
[1550, 1200, 1200, 1550, 1550],
[std5, std5, std5, std5, std5],
[std6, std6, std6, std6, std6],
[std7, std7, std7, std7, std7]])
act_STD_Aged = calc.policy_param('_STD_Aged')
assert np.allclose(act_STD_Aged[:5], exp_STD_Aged)
exp_II_em = np.array([3900, 3950, 5000, 6000, 6000])
act_II_em = calc.policy_param('_II_em')
assert np.allclose(act_II_em[:5], exp_II_em)
def test_ID_HC_vs_BS(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
# specify complete-haircut reform policy and Calculator object
hc_reform = {2013: {'_ID_Medical_hc': [1.0],
'_ID_StateLocalTax_hc': [1.0],
'_ID_RealEstate_hc': [1.0],
'_ID_Casualty_hc': [1.0],
'_ID_Miscellaneous_hc': [1.0],
'_ID_InterestPaid_hc': [1.0],
'_ID_Charity_hc': [1.0]}}
hc_policy = Policy()
hc_policy.implement_reform(hc_reform)
hc_calc = Calculator(policy=hc_policy, records=recs)
hc_calc.calc_all()
hc_taxes = hc_calc.dataframe(['iitax', 'payrolltax'])
del hc_calc
# specify benefit-surtax reform policy and Calculator object
bs_reform = {2013: {'_ID_BenefitSurtax_crt': [0.0],
'_ID_BenefitSurtax_trt': [1.0]}}
bs_policy = Policy()
bs_policy.implement_reform(bs_reform)
bs_calc = Calculator(policy=bs_policy, records=recs)
bs_calc.calc_all()
bs_taxes = bs_calc.dataframe(['iitax', 'payrolltax'])
del bs_calc
# compare calculated taxes generated by the two reforms
assert np.allclose(hc_taxes['payrolltax'], bs_taxes['payrolltax'])
assert np.allclose(hc_taxes['iitax'], bs_taxes['iitax'])
def test_ID_StateLocal_HC_vs_CRT(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
# specify state/local complete haircut reform policy and Calculator object
hc_reform = {2013: {'_ID_StateLocalTax_hc': [1.0]}}
hc_policy = Policy()
hc_policy.implement_reform(hc_reform)
hc_calc = Calculator(policy=hc_policy, records=rec)
hc_calc.calc_all()
# specify AGI cap reform policy and Calculator object
crt_reform = {2013: {'_ID_StateLocalTax_crt': [0.0]}}
crt_policy = Policy()
crt_policy.implement_reform(crt_reform)
crt_calc = Calculator(policy=crt_policy, records=rec)
crt_calc.calc_all()
# compare calculated tax results generated by the two reforms
assert np.allclose(hc_calc.array('payrolltax'),
crt_calc.array('payrolltax'))
assert np.allclose(hc_calc.array('iitax'),
crt_calc.array('iitax'))
def test_ID_RealEstate_HC_vs_CRT(cps_subsample):
rec = Records.cps_constructor(data=cps_subsample, no_benefits=True)
# specify real estate complete haircut reform policy and Calculator object
hc_reform = {2013: {'_ID_RealEstate_hc': [1.0]}}
hc_policy = Policy()
hc_policy.implement_reform(hc_reform)
hc_calc = Calculator(policy=hc_policy, records=rec)
hc_calc.calc_all()
# specify AGI cap reform policy and Calculator object
crt_reform = {2013: {'_ID_RealEstate_crt': [0.0]}}
crt_policy = Policy()
crt_policy.implement_reform(crt_reform)
crt_calc = Calculator(policy=crt_policy, records=rec)
crt_calc.calc_all()
# compare calculated tax results generated by the two reforms
assert np.allclose(hc_calc.array('payrolltax'),
crt_calc.array('payrolltax'))
assert np.allclose(hc_calc.array('iitax'),
crt_calc.array('iitax'))
def test_calculator_using_nonstd_input(rawinputfile):
# check Calculator handling of raw, non-standard input data with no aging
pol = Policy()
pol.set_year(RAWINPUTFILE_YEAR) # set policy params to input data year
nonstd = Records(data=rawinputfile.name,
gfactors=None, # keeps raw data unchanged
weights=None,
start_year=RAWINPUTFILE_YEAR) # set raw input data year
assert nonstd.array_length == RAWINPUTFILE_FUNITS
calc = Calculator(policy=pol, records=nonstd,
sync_years=False) # keeps raw data unchanged
assert calc.current_year == RAWINPUTFILE_YEAR
calc.calc_all()
assert calc.weighted_total('e00200') == 0
assert calc.total_weight() == 0
varlist = ['RECID', 'MARS']
pdf = calc.dataframe(varlist)
assert isinstance(pdf, pd.DataFrame)
assert pdf.shape == (RAWINPUTFILE_FUNITS, len(varlist))
mars = calc.array('MARS')
assert isinstance(mars, np.ndarray)
assert mars.shape == (RAWINPUTFILE_FUNITS,)
exp_iitax = np.zeros((nonstd.array_length,))
assert np.allclose(calc.array('iitax'), exp_iitax)
mtr_ptax, _, _ = calc.mtr(wrt_full_compensation=False)
exp_mtr_ptax = np.zeros((nonstd.array_length,))
exp_mtr_ptax.fill(0.153)
assert np.allclose(mtr_ptax, exp_mtr_ptax)
REFORM_CONTENTS = """
// Example of a reform file suitable for read_json_param_objects().
// This JSON file can contain any number of trailing //-style comments, which
// will be removed before the contents are converted from JSON to a dictionary.
// Within each "policy" object, the primary keys are parameters and
// the secondary keys are years.
// Both the primary and secondary key values must be enclosed in quotes (").
// Boolean variables are specified as true or false (no quotes; all lowercase).
// Parameter code in the policy object is enclosed inside a pair of double
// pipe characters (||).
{
"policy": {
"_AMT_brk1": // top of first AMT tax bracket
{"2015": [200000],
"2017": [300000]
},
"_EITC_c": // maximum EITC amount by number of qualifying kids (0,1,2,3+)
{"2016": [[ 900, 5000, 8000, 9000]],
"2019": [[1200, 7000, 10000, 12000]]
},
"_II_em": // personal exemption amount (see indexing changes below)
{"2016": [6000],
"2018": [7500],
"2020": [9000]
},
"_II_em_cpi": // personal exemption amount indexing status
{"2016": false, // values in future years are same as this year value
"2018": true // values in future years indexed with this year as base
},
"_SS_Earnings_c": // social security (OASDI) maximum taxable earnings
{"2016": [300000],
"2018": [500000],
"2020": [700000]
},
"_AMT_em_cpi": // AMT exemption amount indexing status
{"2017": false, // values in future years are same as this year value
"2020": true // values in future years indexed with this year as base
}
}
}
"""
@pytest.fixture(scope='module', name='reform_file')
def fixture_reform_file():
rfile = tempfile.NamedTemporaryFile(mode='a', delete=False)
rfile.write(REFORM_CONTENTS)
rfile.close()
# must close and then yield for Windows platform
yield rfile
if os.path.isfile(rfile.name):
try:
os.remove(rfile.name)
except OSError:
pass # sometimes we can't remove a generated temporary file
ASSUMP_CONTENTS = """
// Example of assump file suitable for the read_json_param_objects().
// This JSON file can contain any number of trailing //-style comments, which
// will be removed before the contents are converted from JSON to a dictionary.
// Within each "behavior", "consumption" and "growth" object, the
// primary keys are parameters and the secondary keys are years.
// Both the primary and secondary key values must be enclosed in quotes (").
// Boolean variables are specified as true or false (no quotes; all lowercase).
{
"consumption": { "_MPC_e18400": {"2018": [0.05]} },
"behavior": {},
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
@pytest.fixture(scope='module', name='assump_file')
def fixture_assump_file():
afile = tempfile.NamedTemporaryFile(mode='a', delete=False)
afile.write(ASSUMP_CONTENTS)
afile.close()
yield afile
if os.path.isfile(afile.name):
try:
os.remove(afile.name)
except OSError:
pass
@pytest.mark.parametrize("set_year", [False, True])
def test_read_json_reform_file_and_implement_reform(reform_file,
assump_file,
set_year):
pol = Policy()
if set_year:
pol.set_year(2015)
param_dict = Calculator.read_json_param_objects(reform_file.name,
assump_file.name)
pol.implement_reform(param_dict['policy'])
syr = pol.start_year
amt_brk1 = pol._AMT_brk1
assert amt_brk1[2015 - syr] == 200000
assert amt_brk1[2016 - syr] > 200000
assert amt_brk1[2017 - syr] == 300000
assert amt_brk1[2018 - syr] > 300000
ii_em = pol._II_em
assert ii_em[2016 - syr] == 6000
assert ii_em[2017 - syr] == 6000
assert ii_em[2018 - syr] == 7500
assert ii_em[2019 - syr] > 7500
assert ii_em[2020 - syr] == 9000
assert ii_em[2021 - syr] > 9000
amt_em = pol._AMT_em
assert amt_em[2016 - syr, 0] > amt_em[2015 - syr, 0]
assert amt_em[2017 - syr, 0] > amt_em[2016 - syr, 0]
assert amt_em[2018 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2019 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2020 - syr, 0] == amt_em[2017 - syr, 0]
assert amt_em[2021 - syr, 0] > amt_em[2020 - syr, 0]
assert amt_em[2022 - syr, 0] > amt_em[2021 - syr, 0]
add4aged = pol._ID_Medical_frt_add4aged
assert add4aged[2015 - syr] == -0.025
assert add4aged[2016 - syr] == -0.025
assert add4aged[2017 - syr] == 0.0
assert add4aged[2022 - syr] == 0.0
@pytest.fixture(scope='module', name='bad1reformfile')
def fixture_bad1reformfile():
# specify JSON text for reform
txt = """
{
"policy": { // example of incorrect JSON because 'x' must be "x"
'x': {"2014": [4000]}
}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad2reformfile')
def fixture_bad2reformfile():
# specify JSON text for reform
txt = """
{
"title": "",
"policyx": { // example of reform file not containing "policy" key
"_SS_Earnings_c": {"2018": [9e99]}
}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad3reformfile')
def fixture_bad3reformfile():
# specify JSON text for reform
txt = """
{
"title": "",
"policy": {
"_SS_Earnings_c": {"2018": [9e99]}
},
"behavior": { // example of misplaced "behavior" key
}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
def test_read_bad_json_reform_file(bad1reformfile, bad2reformfile,
bad3reformfile):
with pytest.raises(ValueError):
Calculator.read_json_param_objects(bad1reformfile.name, None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(bad2reformfile.name, None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(bad3reformfile.name, None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(list(), None)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, 'unknown_file_name')
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, list())
@pytest.fixture(scope='module', name='bad1assumpfile')
def fixture_bad1assumpfile():
# specify JSON text for assumptions
txt = """
{
"consumption": {},
"behavior": { // example of incorrect JSON because 'x' must be "x"
'x': {"2014": [0.25]}
},
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad2assumpfile')
def fixture_bad2assumpfile():
# specify JSON text for assumptions
txt = """
{
"consumption": {},
"behaviorx": {}, // example of assump file not containing "behavior" key
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
@pytest.fixture(scope='module', name='bad3assumpfile')
def fixture_bad3assumpfile():
# specify JSON text for assump
txt = """
{
"consumption": {},
"behavior": {},
"growdiff_baseline": {},
"growdiff_response": {},
"policy": { // example of misplaced policy key
"_SS_Earnings_c": {"2018": [9e99]}
},
"growmodel": {}
}
"""
f = tempfile.NamedTemporaryFile(mode='a', delete=False)
f.write(txt + '\n')
f.close()
# Must close and then yield for Windows platform
yield f
os.remove(f.name)
def test_read_bad_json_assump_file(bad1assumpfile, bad2assumpfile,
bad3assumpfile):
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, bad1assumpfile.name)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, bad2assumpfile.name)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, bad3assumpfile.name)
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, 'unknown_file_name')
with pytest.raises(ValueError):
Calculator.read_json_param_objects(None, list())
def test_convert_parameter_dict():
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({2013: {'2013': [40000]}})
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({'_II_em': {2013: [40000]}})
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({4567: {2013: [40000]}})
with pytest.raises(ValueError):
Calculator._convert_parameter_dict({'_II_em': 40000})
rdict = Calculator._convert_parameter_dict({'_II_em': {'2013': [40000]}})
assert isinstance(rdict, dict)
def test_calc_all(reform_file, rawinputfile):
cyr = 2016
pol = Policy()
param_dict = Calculator.read_json_param_objects(reform_file.name, None)
pol.implement_reform(param_dict['policy'])
pol.set_year(cyr)
nonstd = Records(data=rawinputfile.name, gfactors=None,
weights=None, start_year=cyr)
assert nonstd.array_length == RAWINPUTFILE_FUNITS
calc = Calculator(policy=pol, records=nonstd,
sync_years=False) # keeps raw data unchanged
assert calc.current_year == cyr
assert calc.reform_warnings == ''
def test_translate_json_reform_suffixes_mars_non_indexed():
# test read_json_param_objects()
# using MARS-indexed parameter suffixes
json1 = """{"policy": {
"_II_em": {"2020": [20000], "2015": [15000]},
"_AMEDT_ec_joint": {"2018": [400000], "2016": [300000]},
"_AMEDT_ec_separate": {"2017": [150000], "2019": [200000]}
}}"""
pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)
rdict1 = pdict1['policy']
json2 = """{"policy": {
"_AMEDT_ec": {"2016": [[200000, 300000, 125000, 200000, 200000]],
"2017": [[200000, 300000, 150000, 200000, 200000]],
"2018": [[200000, 400000, 150000, 200000, 200000]],
"2019": [[200000, 400000, 200000, 200000, 200000]]},
"_II_em": {"2015": [15000], "2020": [20000]}
}}"""
pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)
rdict2 = pdict2['policy']
assert len(rdict2) == len(rdict1)
for year in rdict2.keys():
if '_II_em' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_II_em'],
rdict2[year]['_II_em'],
atol=0.01, rtol=0.0)
if '_AMEDT_ec' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_AMEDT_ec'],
rdict2[year]['_AMEDT_ec'],
atol=0.01, rtol=0.0)
def test_translate_json_reform_suffixes_eic():
# test read_json_param_objects(...)
# using EIC-indexed parameter suffixes
json1 = """{"policy": {
"_II_em": {"2020": [20000], "2015": [15000]},
"_EITC_c_0kids": {"2018": [510], "2019": [510]},
"_EITC_c_1kid": {"2019": [3400], "2018": [3400]},
"_EITC_c_2kids": {"2018": [5616], "2019": [5616]},
"_EITC_c_3+kids": {"2019": [6318], "2018": [6318]}
}}"""
pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)
rdict1 = pdict1['policy']
json2 = """{"policy": {
"_EITC_c": {"2019": [[510, 3400, 5616, 6318]],
"2018": [[510, 3400, 5616, 6318]]},
"_II_em": {"2020": [20000], "2015": [15000]}
}}"""
pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)
rdict2 = pdict2['policy']
assert len(rdict2) == len(rdict1)
for year in rdict2.keys():
if '_II_em' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_II_em'],
rdict2[year]['_II_em'],
atol=0.01, rtol=0.0)
if '_EITC_c' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_EITC_c'],
rdict2[year]['_EITC_c'],
atol=0.01, rtol=0.0)
def test_translate_json_reform_suffixes_idedtype():
# test read_json_param_objects(...)
# using idedtype-indexed parameter suffixes
json1 = """{"policy": {
"_ID_BenefitCap_rt": {"2019": [0.2]},
"_ID_BenefitCap_Switch_medical": {"2019": [false]},
"_ID_BenefitCap_Switch_casualty": {"2019": [false]},
"_ID_BenefitCap_Switch_misc": {"2019": [false]},
"_ID_BenefitCap_Switch_interest": {"2019": [false]},
"_ID_BenefitCap_Switch_charity": {"2019": [false]},
"_II_em": {"2020": [20000], "2015": [15000]}
}}"""
pdict1 = Calculator.read_json_param_objects(reform=json1, assump=None)
rdict1 = pdict1['policy']
json2 = """{"policy": {
"_II_em": {"2020": [20000], "2015": [15000]},
"_ID_BenefitCap_Switch": {
"2019": [[false, true, true, false, false, false, false]]
},
"_ID_BenefitCap_rt": {"2019": [0.2]}
}}"""
pdict2 = Calculator.read_json_param_objects(reform=json2, assump=None)
rdict2 = pdict2['policy']
assert len(rdict2) == len(rdict1)
for year in rdict2.keys():
if '_II_em' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_II_em'],
rdict2[year]['_II_em'],
atol=0.01, rtol=0.0)
if '_ID_BenefitCap_rt' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_ID_BenefitCap_rt'],
rdict2[year]['_ID_BenefitCap_rt'],
atol=0.01, rtol=0.0)
if '_ID_BenefitCap_Switch' in rdict2[year].keys():
assert np.allclose(rdict1[year]['_ID_BenefitCap_Switch'],
rdict2[year]['_ID_BenefitCap_Switch'],
atol=0.01, rtol=0.0)
def test_read_json_param_with_suffixes_and_errors():
# test interaction of policy parameter suffixes and reform errors
# (fails without 0.10.2 bug fix as reported by Hank Doupe in PB PR#641)
reform = {
u'policy': {
u'_II_brk4_separate': {u'2017': [5000.0]},
u'_STD_separate': {u'2017': [8000.0]},
u'_STD_single': {u'2018': [1000.0]},
u'_II_brk2_headhousehold': {u'2017': [1000.0]},
u'_II_brk4_single': {u'2017': [500.0]},
u'_STD_joint': {u'2017': [10000.0], u'2020': [150.0]},
u'_II_brk2_separate': {u'2017': [1000.0]},
u'_II_brk2_single': {u'2017': [1000.0]},
u'_II_brk2_joint': {u'2017': [1000.0]},
u'_FICA_ss_trt': {u'2017': [-1.0], u'2019': [0.1]},
u'_II_brk4_headhousehold': {u'2017': [500.0]},
u'_STD_headhousehold': {u'2017': [10000.0], u'2020': [150.0]},
u'_II_brk4_joint': {u'2017': [500.0]},
u'_ID_BenefitSurtax_Switch_medical': {u'2017': [True]}
}
}
json_reform = json.dumps(reform)
params = Calculator.read_json_param_objects(json_reform, None)
assert isinstance(params, dict)
pol = Policy()
pol.ignore_reform_errors()
pol.implement_reform(params['policy'],
print_warnings=False, raise_errors=False)
assert len(pol.parameter_errors) > 0
assert len(pol.parameter_warnings) > 0
def test_noreform_documentation():
reform_json = """
{
"policy": {}
}
"""
assump_json = """
{
"consumption": {},
"behavior": {},
"growdiff_baseline": {},
"growdiff_response": {},
"growmodel": {}
}
"""
params = Calculator.read_json_param_objects(reform_json, assump_json)
assert isinstance(params, dict)
actual_doc = Calculator.reform_documentation(params)
expected_doc = (
'REFORM DOCUMENTATION\n'
'Baseline Growth-Difference Assumption Values by Year:\n'
'none: using default baseline growth assumptions\n'
'Policy Reform Parameter Values by Year:\n'
'none: using current-law policy parameters\n'
)
assert actual_doc == expected_doc
def test_reform_documentation():
reform_json = """
{
"policy": {
"_II_em_cpi": {"2016": false,
"2018": true},
"_II_em": {"2016": [5000],
"2018": [6000],
"2020": [7000]},
"_EITC_indiv": {"2017": [true]},
"_STD_Aged_cpi": {"2016": false},
"_STD_Aged": {"2016": [[1600, 1300, 1300, 1600, 1600]],
"2020": [[2000, 2000, 2000, 2000, 2000]]},
"_ID_BenefitCap_Switch_medical": {"2020": [false]},
"_ID_BenefitCap_Switch_casualty": {"2020": [false]},
"_ID_BenefitCap_Switch_misc": {"2020": [false]},
"_ID_BenefitCap_Switch_interest": {"2020": [false]},
"_ID_BenefitCap_Switch_charity": {"2020": [false]}
}
}
"""
assump_json = """
{
"consumption": {},
"behavior": {},
// increase baseline inflation rate by one percentage point in 2014+
// (has no effect on known policy parameter values)
"growdiff_baseline": {"_ACPIU": {"2014": [0.01]}},
"growdiff_response": {},
"growmodel": {}
}
"""
params = Calculator.read_json_param_objects(reform_json, assump_json)
assert isinstance(params, dict)
doc = Calculator.reform_documentation(params)
assert isinstance(doc, six.string_types)
dump = False # set to True to print documentation and force test failure
if dump:
print(doc)
assert 1 == 2
def test_distribution_tables(cps_subsample):
pol = Policy()
recs = Records.cps_constructor(data=cps_subsample)
calc1 = Calculator(policy=pol, records=recs)
assert calc1.current_year == 2014
calc1.calc_all()
dt1, dt2 = calc1.distribution_tables(None, 'weighted_deciles')
assert isinstance(dt1, pd.DataFrame)
assert dt2 is None
dt1, dt2 = calc1.distribution_tables(calc1, 'weighted_deciles')
assert isinstance(dt1, pd.DataFrame)
assert isinstance(dt2, pd.DataFrame)
reform = {2014: {'_UBI_u18': [1000],
'_UBI_1820': [1000],
'_UBI_21': [1000]}}
pol.implement_reform(reform)
assert not pol.parameter_errors
calc2 = Calculator(policy=pol, records=recs)
calc2.calc_all()
dt1, dt2 = calc1.distribution_tables(calc2, 'weighted_deciles')
assert isinstance(dt1, pd.DataFrame)
assert isinstance(dt2, pd.DataFrame)
def test_difference_table(cps_subsample):
cyr = 2014
pol = Policy()
recs = Records.cps_constructor(data=cps_subsample)
calc1 = Calculator(policy=pol, records=recs)
assert calc1.current_year == cyr
reform = {cyr: {'_SS_Earnings_c': [9e99]}}
pol.implement_reform(reform)
calc2 = Calculator(policy=pol, records=recs)
assert calc2.current_year == cyr
calc1.calc_all()
calc2.calc_all()
diff = calc1.difference_table(calc2, 'weighted_deciles', 'iitax')
assert isinstance(diff, pd.DataFrame)
def test_diagnostic_table(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
adt = calc.diagnostic_table(3)
assert isinstance(adt, pd.DataFrame)
def test_mtr_graph(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
fig = calc.mtr_graph(calc,
mars=2,
income_measure='wages',
mtr_measure='ptax')
assert fig
fig = calc.mtr_graph(calc,
income_measure='agi',
mtr_measure='itax')
assert fig
def test_atr_graph(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
fig = calc.atr_graph(calc, mars=2, atr_measure='itax')
assert fig
fig = calc.atr_graph(calc, atr_measure='ptax')
assert fig
def test_privacy_of_embedded_objects(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
with pytest.raises(AttributeError):
cyr = calc.__policy.current_year
with pytest.raises(AttributeError):
wgh = calc.__records.s006
with pytest.raises(AttributeError):
cyr = calc.__consumption.current_year
with pytest.raises(AttributeError):
cyr = calc.__behavior.current_year
def test_n65(cps_subsample):
recs = Records.cps_constructor(data=cps_subsample, no_benefits=True)
calc = Calculator(policy=Policy(), records=recs)
assert calc.n65().sum() > 1500
| true | true |
f723b7721b96101a76c0e12c365a4083110cae7a | 3,147 | py | Python | drafthub/core/templatetags/drafthub_extras.py | felipelincoln/drafthub | e8a6055205646d45c26419938c3277f544931925 | [
"MIT"
] | 3 | 2020-05-13T09:54:27.000Z | 2021-01-06T17:46:53.000Z | drafthub/core/templatetags/drafthub_extras.py | felipelincoln/drafthub | e8a6055205646d45c26419938c3277f544931925 | [
"MIT"
] | 3 | 2020-05-20T17:27:10.000Z | 2020-05-20T20:41:37.000Z | drafthub/core/templatetags/drafthub_extras.py | felipelincoln/drafthub | e8a6055205646d45c26419938c3277f544931925 | [
"MIT"
] | null | null | null | import requests
import re
from django import template
from django.utils.safestring import mark_safe
import markdown as _markdown
import bleach
from pymdownx import emoji
from drafthub.draft.utils import get_data_from_url
markdown_kwargs = {
'extensions':[
'pymdownx.superfences',
'markdown.extensions.tables',
'pymdownx.betterem',
'pymdownx.tilde',
'pymdownx.emoji',
'pymdownx.tasklist',
'pymdownx.magiclink',
'pymdownx.arithmatex',
],
'extension_configs':{
'pymdownx.tilde': {
'subscript': False
},
'pymdownx.emoji':{
'emoji_index': emoji.gemoji,
'emoji_generator': emoji.to_png,
'alt': 'short',
'options': {
'attributes': {
'align': 'absmiddle',
'height': '20px',
'width': '20px'
},
}
},
'pymdownx.arithmatex':{
'generic': True,
}
}
}
bleach_kwargs = {
'tags': [
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'b', 'i', 'strong', 'em', 'tt', 'del',
'p', 'br',
'span', 'div', 'blockquote', 'code', 'hr', 'pre',
'ul', 'ol', 'li', 'dd', 'dt', 'dl',
'img',
'a',
'sub', 'sup',
'table', 'thead','td', 'tr', 'th', 'tbody',
'input', # allow only type, checked and disabled
],
'attributes':{
'*': lambda *_: 1,
}
}
register = template.Library()
@register.filter
@mark_safe
def markdown(github_url):
url = github_url
data = get_data_from_url(url)
raw = data['raw']
login = data['login']
repo = data['repo']
parent = data['parent']
markdown_kwargs['extension_configs']['pymdownx.magiclink'] = {
'repo_url_shortener': True,
'repo_url_shorthand': True,
'social_url_shorthand': True,
'provider': 'github',
'user': login,
'repo': repo,
}
url_response = requests.get(raw)
unsafe_content = url_response.text
re_links = '\[(.*)\]\((?!https?:\/\/|#)(.+)\)'
match_links = re.compile(re_links)
content_transform = match_links.sub(
r'[\1](' + parent + r'\2)', unsafe_content)
markdown_content = _markdown.markdown(content_transform, **markdown_kwargs)
sanitized_content = bleach.clean(markdown_content, **bleach_kwargs)
return sanitized_content
@register.filter
@mark_safe
def plaintext_markdown(text):
markdown_content = _markdown.markdown(text, **markdown_kwargs)
sanitized_content = bleach.clean(markdown_content, **bleach_kwargs)
return sanitized_content
@register.filter
def count_range(n):
return range(1,n+1)
@register.filter
def in_queryset(blog, queryset):
return blog in queryset
@register.filter
def get_model_name(queryset):
return queryset[0]._meta.model_name
@register.filter
def timesince_format(value):
value_str = value.split(',')[0]
if value_str:
value_str = value_str + ' ago'
return value_str
@register.filter
def js_bool(value):
return str(bool(value)).lower()
| 24.395349 | 79 | 0.5796 | import requests
import re
from django import template
from django.utils.safestring import mark_safe
import markdown as _markdown
import bleach
from pymdownx import emoji
from drafthub.draft.utils import get_data_from_url
markdown_kwargs = {
'extensions':[
'pymdownx.superfences',
'markdown.extensions.tables',
'pymdownx.betterem',
'pymdownx.tilde',
'pymdownx.emoji',
'pymdownx.tasklist',
'pymdownx.magiclink',
'pymdownx.arithmatex',
],
'extension_configs':{
'pymdownx.tilde': {
'subscript': False
},
'pymdownx.emoji':{
'emoji_index': emoji.gemoji,
'emoji_generator': emoji.to_png,
'alt': 'short',
'options': {
'attributes': {
'align': 'absmiddle',
'height': '20px',
'width': '20px'
},
}
},
'pymdownx.arithmatex':{
'generic': True,
}
}
}
bleach_kwargs = {
'tags': [
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'b', 'i', 'strong', 'em', 'tt', 'del',
'p', 'br',
'span', 'div', 'blockquote', 'code', 'hr', 'pre',
'ul', 'ol', 'li', 'dd', 'dt', 'dl',
'img',
'a',
'sub', 'sup',
'table', 'thead','td', 'tr', 'th', 'tbody',
'input',
],
'attributes':{
'*': lambda *_: 1,
}
}
register = template.Library()
@register.filter
@mark_safe
def markdown(github_url):
url = github_url
data = get_data_from_url(url)
raw = data['raw']
login = data['login']
repo = data['repo']
parent = data['parent']
markdown_kwargs['extension_configs']['pymdownx.magiclink'] = {
'repo_url_shortener': True,
'repo_url_shorthand': True,
'social_url_shorthand': True,
'provider': 'github',
'user': login,
'repo': repo,
}
url_response = requests.get(raw)
unsafe_content = url_response.text
re_links = '\[(.*)\]\((?!https?:\/\/|#)(.+)\)'
match_links = re.compile(re_links)
content_transform = match_links.sub(
r'[\1](' + parent + r'\2)', unsafe_content)
markdown_content = _markdown.markdown(content_transform, **markdown_kwargs)
sanitized_content = bleach.clean(markdown_content, **bleach_kwargs)
return sanitized_content
@register.filter
@mark_safe
def plaintext_markdown(text):
markdown_content = _markdown.markdown(text, **markdown_kwargs)
sanitized_content = bleach.clean(markdown_content, **bleach_kwargs)
return sanitized_content
@register.filter
def count_range(n):
return range(1,n+1)
@register.filter
def in_queryset(blog, queryset):
return blog in queryset
@register.filter
def get_model_name(queryset):
return queryset[0]._meta.model_name
@register.filter
def timesince_format(value):
value_str = value.split(',')[0]
if value_str:
value_str = value_str + ' ago'
return value_str
@register.filter
def js_bool(value):
return str(bool(value)).lower()
| true | true |
f723b7b58e53b5843835f93d00c72899fbaaa3ce | 16,086 | py | Python | models/model/seq2seq_im_mask_cnn_finetune.py | shivgarg/alfred_transformers | 3eab07d3a218eb9b809dec8b7120b92ebd00c890 | [
"MIT"
] | null | null | null | models/model/seq2seq_im_mask_cnn_finetune.py | shivgarg/alfred_transformers | 3eab07d3a218eb9b809dec8b7120b92ebd00c890 | [
"MIT"
] | null | null | null | models/model/seq2seq_im_mask_cnn_finetune.py | shivgarg/alfred_transformers | 3eab07d3a218eb9b809dec8b7120b92ebd00c890 | [
"MIT"
] | null | null | null | import os
import torch
import numpy as np
import nn.vnn as vnn
import collections
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from model.seq2seq import Module as Base
from models.utils.metric import compute_f1, compute_exact
from gen.utils.image_util import decompress_mask
from torchvision import transforms
from PIL import Image
class Module(Base):
def __init__(self, args, vocab):
'''
Seq2Seq agent
'''
super().__init__(args, vocab)
# encoder and self-attention
self.enc = nn.LSTM(args.demb, args.dhid, bidirectional=True, batch_first=True)
self.enc_att = vnn.SelfAttn(args.dhid*2)
# subgoal monitoring
self.subgoal_monitoring = (self.args.pm_aux_loss_wt > 0 or self.args.subgoal_aux_loss_wt > 0)
# frame mask decoder
decoder = vnn.ConvFrameMaskDecoderProgressMonitorFinetune if self.subgoal_monitoring else vnn.ConvFrameMaskDecoder
self.dec = decoder(self.emb_action_low, args.dframe, 2*args.dhid,
pframe=args.pframe,
attn_dropout=args.attn_dropout,
hstate_dropout=args.hstate_dropout,
actor_dropout=args.actor_dropout,
input_dropout=args.input_dropout,
teacher_forcing=args.dec_teacher_forcing)
# dropouts
self.vis_dropout = nn.Dropout(args.vis_dropout)
self.lang_dropout = nn.Dropout(args.lang_dropout, inplace=True)
self.input_dropout = nn.Dropout(args.input_dropout)
# internal states
self.state_t = None
self.e_t = None
self.test_mode = False
# bce reconstruction loss
self.bce_with_logits = torch.nn.BCEWithLogitsLoss(reduction='none')
self.mse_loss = torch.nn.MSELoss(reduction='none')
# paths
self.root_path = os.getcwd()
self.feat_pt = 'feat_conv.pt'
# params
self.max_subgoals = 25
self.max_episode_len = args.max_episode_len
# reset model
self.reset()
def featurize(self, batch, load_mask=True, load_frames=True):
'''
tensorize and pad batch input
'''
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
feat = collections.defaultdict(list)
for ex in batch:
###########
# auxillary
###########
if not self.test_mode:
# subgoal completion supervision
if self.args.subgoal_aux_loss_wt > 0:
feat['subgoals_completed'].append(np.array(ex['num']['low_to_high_idx']) / self.max_subgoals)
# progress monitor supervision
if self.args.pm_aux_loss_wt > 0:
num_actions = len([a for sg in ex['num']['action_low'] for a in sg])
subgoal_progress = [(i+1)/float(num_actions) for i in range(num_actions)]
feat['subgoal_progress'].append(subgoal_progress)
#########
# inputs
#########
# serialize segments
self.serialize_lang_action(ex)
# goal and instr language
lang_goal, lang_instr = ex['num']['lang_goal'], ex['num']['lang_instr']
# zero inputs if specified
lang_goal = self.zero_input(lang_goal) if self.args.zero_goal else lang_goal
lang_instr = self.zero_input(lang_instr) if self.args.zero_instr else lang_instr
# append goal + instr
lang_goal_instr = lang_goal + lang_instr
feat['lang_goal_instr'].append(lang_goal_instr)
episode_len = 0
# load Resnet features from disk
if load_frames and not self.test_mode:
root = self.get_task_root(ex)
#im = torch.load(os.path.join(root, self.feat_pt))
im = []
path = "{}/{}".format(root,'raw_images')
imgs = sorted(os.listdir(path))
tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])
for img in imgs:
im.append(tfms(Image.open("{}/{}".format(path,img))))
im = torch.stack(im)
num_low_actions = len(ex['plan']['low_actions'])
num_feat_frames = im.shape[0]
if num_low_actions != num_feat_frames:
keep = [None] * len(ex['plan']['low_actions'])
for i, d in enumerate(ex['images']):
# only add frames linked with low-level actions (i.e. skip filler frames like smooth rotations and dish washing)
if keep[d['low_idx']] is None:
keep[d['low_idx']] = im[i]
keep.append(keep[-1]) # stop frame
episode_len = min(self.max_episode_len, len(keep))
keep = keep[:episode_len]
feat['frames'].append(torch.stack(keep, dim=0))
else:
episode_len = min(self.max_episode_len, len(im))
im = im[:episode_len]
feat['frames'].append(torch.cat([im, im[-1].unsqueeze(0)], dim=0)) # add stop frame
#########
# outputs
#########
if self.args.subgoal_aux_loss_wt > 0:
feat['subgoals_completed'][-1] = feat['subgoals_completed'][-1][:episode_len]
if self.args.pm_aux_loss_wt > 0:
feat['subgoal_progress'][-1] = feat['subgoal_progress'][-1][:episode_len]
if not self.test_mode:
# low-level action
feat['action_low'].append([a['action'] for a in ex['num']['action_low']][:episode_len])
# low-level action mask
if load_mask:
feat['action_low_mask'].append([self.decompress_mask(a['mask']) for i,a in enumerate(ex['num']['action_low']) if a['mask'] is not None and i<episode_len])
# low-level valid interact
feat['action_low_valid_interact'].append([a['valid_interact'] for a in ex['num']['action_low']][:episode_len])
# tensorization and padding
for k, v in feat.items():
if k in {'lang_goal_instr'}:
# language embedding and padding
seqs = [torch.tensor(vv, device=device) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
seq_lengths = np.array(list(map(len, v)))
embed_seq = self.emb_word(pad_seq)
packed_input = pack_padded_sequence(embed_seq, seq_lengths, batch_first=True, enforce_sorted=False)
feat[k] = packed_input
elif k in {'action_low_mask'}:
# mask padding
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
feat[k] = seqs
elif k in {'subgoal_progress', 'subgoals_completed'}:
# auxillary padding
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
feat[k] = pad_seq
else:
# default: tensorize and pad sequence
seqs = [torch.tensor(vv, device=device, dtype=torch.float if ('frames' in k) else torch.long) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
feat[k] = pad_seq
return feat
def serialize_lang_action(self, feat):
'''
append segmented instr language and low-level actions into single sequences
'''
is_serialized = not isinstance(feat['num']['lang_instr'][0], list)
if not is_serialized:
feat['num']['lang_instr'] = [word for desc in feat['num']['lang_instr'] for word in desc]
if not self.test_mode:
feat['num']['action_low'] = [a for a_group in feat['num']['action_low'] for a in a_group]
def decompress_mask(self, compressed_mask):
'''
decompress mask from json files
'''
mask = np.array(decompress_mask(compressed_mask))
mask = np.expand_dims(mask, axis=0)
return mask
def forward(self, feat, max_decode=300):
cont_lang, enc_lang = self.encode_lang(feat)
state_0 = cont_lang, torch.zeros_like(cont_lang)
frames = self.vis_dropout(feat['frames'])
res = self.dec(enc_lang, frames, max_decode=self.max_episode_len, gold=feat['action_low'], state_0=state_0)
feat.update(res)
return feat
def encode_lang(self, feat):
'''
encode goal+instr language
'''
emb_lang_goal_instr = feat['lang_goal_instr']
self.lang_dropout(emb_lang_goal_instr.data)
enc_lang_goal_instr, _ = self.enc(emb_lang_goal_instr)
enc_lang_goal_instr, _ = pad_packed_sequence(enc_lang_goal_instr, batch_first=True)
self.lang_dropout(enc_lang_goal_instr)
cont_lang_goal_instr = self.enc_att(enc_lang_goal_instr)
return cont_lang_goal_instr, enc_lang_goal_instr
def reset(self):
'''
reset internal states (used for real-time execution during eval)
'''
self.r_state = {
'state_t': None,
'e_t': None,
'cont_lang': None,
'enc_lang': None
}
def step(self, feat, prev_action=None):
'''
forward the model for a single time-step (used for real-time execution during eval)
'''
# encode language features
if self.r_state['cont_lang'] is None and self.r_state['enc_lang'] is None:
self.r_state['cont_lang'], self.r_state['enc_lang'] = self.encode_lang(feat)
# initialize embedding and hidden states
if self.r_state['e_t'] is None and self.r_state['state_t'] is None:
self.r_state['e_t'] = self.dec.go.repeat(self.r_state['enc_lang'].size(0), 1)
self.r_state['state_t'] = self.r_state['cont_lang'], torch.zeros_like(self.r_state['cont_lang'])
# previous action embedding
e_t = self.embed_action(prev_action) if prev_action is not None else self.r_state['e_t']
# decode and save embedding and hidden states
out_action_low, out_action_low_mask, state_t, *_ = self.dec.step(self.r_state['enc_lang'], feat['frames'][:, 0], e_t=e_t, state_tm1=self.r_state['state_t'])
# save states
self.r_state['state_t'] = state_t
self.r_state['e_t'] = self.dec.emb(out_action_low.max(1)[1])
# output formatting
feat['out_action_low'] = out_action_low.unsqueeze(0)
feat['out_action_low_mask'] = out_action_low_mask.unsqueeze(0)
return feat
def extract_preds(self, out, batch, feat, clean_special_tokens=True):
'''
output processing
'''
pred = {}
for ex, alow, alow_mask in zip(batch, feat['out_action_low'].max(2)[1].tolist(), feat['out_action_low_mask']):
# remove padding tokens
if self.pad in alow:
pad_start_idx = alow.index(self.pad)
alow = alow[:pad_start_idx]
alow_mask = alow_mask[:pad_start_idx]
if clean_special_tokens:
# remove <<stop>> tokens
if self.stop_token in alow:
stop_start_idx = alow.index(self.stop_token)
alow = alow[:stop_start_idx]
alow_mask = alow_mask[:stop_start_idx]
# index to API actions
words = self.vocab['action_low'].index2word(alow)
# sigmoid preds to binary mask
alow_mask = F.sigmoid(alow_mask)
p_mask = [(alow_mask[t] > 0.5).cpu().numpy() for t in range(alow_mask.shape[0])]
task_id_ann = self.get_task_and_ann_id(ex)
pred[task_id_ann] = {
'action_low': ' '.join(words),
'action_low_mask': p_mask,
}
return pred
def embed_action(self, action):
'''
embed low-level action
'''
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
action_num = torch.tensor(self.vocab['action_low'].word2index(action), device=device)
action_emb = self.dec.emb(action_num).unsqueeze(0)
return action_emb
def compute_loss(self, out, batch, feat):
'''
loss function for Seq2Seq agent
'''
losses = dict()
# GT and predictions
p_alow = out['out_action_low'].view(-1, len(self.vocab['action_low']))
l_alow = feat['action_low'].view(-1)
p_alow_mask = out['out_action_low_mask']
valid = feat['action_low_valid_interact']
# action loss
pad_valid = (l_alow != self.pad)
alow_loss = F.cross_entropy(p_alow, l_alow, reduction='none')
alow_loss *= pad_valid.float()
alow_loss = alow_loss.mean()
losses['action_low'] = alow_loss * self.args.action_loss_wt
# mask loss
valid_idxs = valid.view(-1).nonzero().view(-1)
flat_p_alow_mask = p_alow_mask.view(p_alow_mask.shape[0]*p_alow_mask.shape[1], *p_alow_mask.shape[2:])[valid_idxs]
if flat_p_alow_mask.shape[0]!=0:
flat_alow_mask = torch.cat(feat['action_low_mask'], dim=0)
alow_mask_loss = self.weighted_mask_loss(flat_p_alow_mask, flat_alow_mask)
losses['action_low_mask'] = alow_mask_loss * self.args.mask_loss_wt
# subgoal completion loss
if self.args.subgoal_aux_loss_wt > 0:
p_subgoal = feat['out_subgoal'].squeeze(2)
l_subgoal = feat['subgoals_completed']
sg_loss = self.mse_loss(p_subgoal, l_subgoal)
sg_loss = sg_loss.view(-1) * pad_valid.float()
subgoal_loss = sg_loss.mean()
losses['subgoal_aux'] = self.args.subgoal_aux_loss_wt * subgoal_loss
# progress monitoring loss
if self.args.pm_aux_loss_wt > 0:
p_progress = feat['out_progress'].squeeze(2)
l_progress = feat['subgoal_progress']
pg_loss = self.mse_loss(p_progress, l_progress)
pg_loss = pg_loss.view(-1) * pad_valid.float()
progress_loss = pg_loss.mean()
losses['progress_aux'] = self.args.pm_aux_loss_wt * progress_loss
return losses
def weighted_mask_loss(self, pred_masks, gt_masks):
'''
mask loss that accounts for weight-imbalance between 0 and 1 pixels
'''
bce = self.bce_with_logits(pred_masks, gt_masks)
flipped_mask = self.flip_tensor(gt_masks)
inside = (bce * gt_masks).sum() / (gt_masks).sum()
outside = (bce * flipped_mask).sum() / (flipped_mask).sum()
return inside + outside
def flip_tensor(self, tensor, on_zero=1, on_non_zero=0):
'''
flip 0 and 1 values in tensor
'''
res = tensor.clone()
res[tensor == 0] = on_zero
res[tensor != 0] = on_non_zero
return res
def compute_metric(self, preds, data):
'''
compute f1 and extract match scores for output
'''
m = collections.defaultdict(list)
for task in data:
ex = self.load_task_json(task)
i = self.get_task_and_ann_id(ex)
label = ' '.join([a['discrete_action']['action'] for a in ex['plan']['low_actions']])
m['action_low_f1'].append(compute_f1(label.lower(), preds[i]['action_low'].lower()))
m['action_low_em'].append(compute_exact(label.lower(), preds[i]['action_low'].lower()))
return {k: sum(v)/len(v) for k, v in m.items()}
| 40.724051 | 174 | 0.586597 | import os
import torch
import numpy as np
import nn.vnn as vnn
import collections
from torch import nn
from torch.nn import functional as F
from torch.nn.utils.rnn import pad_sequence, pack_padded_sequence, pad_packed_sequence
from model.seq2seq import Module as Base
from models.utils.metric import compute_f1, compute_exact
from gen.utils.image_util import decompress_mask
from torchvision import transforms
from PIL import Image
class Module(Base):
def __init__(self, args, vocab):
super().__init__(args, vocab)
self.enc = nn.LSTM(args.demb, args.dhid, bidirectional=True, batch_first=True)
self.enc_att = vnn.SelfAttn(args.dhid*2)
self.subgoal_monitoring = (self.args.pm_aux_loss_wt > 0 or self.args.subgoal_aux_loss_wt > 0)
decoder = vnn.ConvFrameMaskDecoderProgressMonitorFinetune if self.subgoal_monitoring else vnn.ConvFrameMaskDecoder
self.dec = decoder(self.emb_action_low, args.dframe, 2*args.dhid,
pframe=args.pframe,
attn_dropout=args.attn_dropout,
hstate_dropout=args.hstate_dropout,
actor_dropout=args.actor_dropout,
input_dropout=args.input_dropout,
teacher_forcing=args.dec_teacher_forcing)
self.vis_dropout = nn.Dropout(args.vis_dropout)
self.lang_dropout = nn.Dropout(args.lang_dropout, inplace=True)
self.input_dropout = nn.Dropout(args.input_dropout)
self.state_t = None
self.e_t = None
self.test_mode = False
self.bce_with_logits = torch.nn.BCEWithLogitsLoss(reduction='none')
self.mse_loss = torch.nn.MSELoss(reduction='none')
self.root_path = os.getcwd()
self.feat_pt = 'feat_conv.pt'
self.max_subgoals = 25
self.max_episode_len = args.max_episode_len
self.reset()
def featurize(self, batch, load_mask=True, load_frames=True):
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
feat = collections.defaultdict(list)
for ex in batch:
ubgoal_aux_loss_wt > 0:
feat['subgoals_completed'].append(np.array(ex['num']['low_to_high_idx']) / self.max_subgoals)
if self.args.pm_aux_loss_wt > 0:
num_actions = len([a for sg in ex['num']['action_low'] for a in sg])
subgoal_progress = [(i+1)/float(num_actions) for i in range(num_actions)]
feat['subgoal_progress'].append(subgoal_progress)
_action(ex)
lang_goal, lang_instr = ex['num']['lang_goal'], ex['num']['lang_instr']
lang_goal = self.zero_input(lang_goal) if self.args.zero_goal else lang_goal
lang_instr = self.zero_input(lang_instr) if self.args.zero_instr else lang_instr
lang_goal_instr = lang_goal + lang_instr
feat['lang_goal_instr'].append(lang_goal_instr)
episode_len = 0
if load_frames and not self.test_mode:
root = self.get_task_root(ex)
im = []
path = "{}/{}".format(root,'raw_images')
imgs = sorted(os.listdir(path))
tfms = transforms.Compose([transforms.Resize(224), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),])
for img in imgs:
im.append(tfms(Image.open("{}/{}".format(path,img))))
im = torch.stack(im)
num_low_actions = len(ex['plan']['low_actions'])
num_feat_frames = im.shape[0]
if num_low_actions != num_feat_frames:
keep = [None] * len(ex['plan']['low_actions'])
for i, d in enumerate(ex['images']):
if keep[d['low_idx']] is None:
keep[d['low_idx']] = im[i]
keep.append(keep[-1])
episode_len = min(self.max_episode_len, len(keep))
keep = keep[:episode_len]
feat['frames'].append(torch.stack(keep, dim=0))
else:
episode_len = min(self.max_episode_len, len(im))
im = im[:episode_len]
feat['frames'].append(torch.cat([im, im[-1].unsqueeze(0)], dim=0))
> 0:
feat['subgoals_completed'][-1] = feat['subgoals_completed'][-1][:episode_len]
if self.args.pm_aux_loss_wt > 0:
feat['subgoal_progress'][-1] = feat['subgoal_progress'][-1][:episode_len]
if not self.test_mode:
feat['action_low'].append([a['action'] for a in ex['num']['action_low']][:episode_len])
if load_mask:
feat['action_low_mask'].append([self.decompress_mask(a['mask']) for i,a in enumerate(ex['num']['action_low']) if a['mask'] is not None and i<episode_len])
feat['action_low_valid_interact'].append([a['valid_interact'] for a in ex['num']['action_low']][:episode_len])
for k, v in feat.items():
if k in {'lang_goal_instr'}:
seqs = [torch.tensor(vv, device=device) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
seq_lengths = np.array(list(map(len, v)))
embed_seq = self.emb_word(pad_seq)
packed_input = pack_padded_sequence(embed_seq, seq_lengths, batch_first=True, enforce_sorted=False)
feat[k] = packed_input
elif k in {'action_low_mask'}:
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
feat[k] = seqs
elif k in {'subgoal_progress', 'subgoals_completed'}:
seqs = [torch.tensor(vv, device=device, dtype=torch.float) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
feat[k] = pad_seq
else:
seqs = [torch.tensor(vv, device=device, dtype=torch.float if ('frames' in k) else torch.long) for vv in v]
pad_seq = pad_sequence(seqs, batch_first=True, padding_value=self.pad)
feat[k] = pad_seq
return feat
def serialize_lang_action(self, feat):
is_serialized = not isinstance(feat['num']['lang_instr'][0], list)
if not is_serialized:
feat['num']['lang_instr'] = [word for desc in feat['num']['lang_instr'] for word in desc]
if not self.test_mode:
feat['num']['action_low'] = [a for a_group in feat['num']['action_low'] for a in a_group]
def decompress_mask(self, compressed_mask):
mask = np.array(decompress_mask(compressed_mask))
mask = np.expand_dims(mask, axis=0)
return mask
def forward(self, feat, max_decode=300):
cont_lang, enc_lang = self.encode_lang(feat)
state_0 = cont_lang, torch.zeros_like(cont_lang)
frames = self.vis_dropout(feat['frames'])
res = self.dec(enc_lang, frames, max_decode=self.max_episode_len, gold=feat['action_low'], state_0=state_0)
feat.update(res)
return feat
def encode_lang(self, feat):
emb_lang_goal_instr = feat['lang_goal_instr']
self.lang_dropout(emb_lang_goal_instr.data)
enc_lang_goal_instr, _ = self.enc(emb_lang_goal_instr)
enc_lang_goal_instr, _ = pad_packed_sequence(enc_lang_goal_instr, batch_first=True)
self.lang_dropout(enc_lang_goal_instr)
cont_lang_goal_instr = self.enc_att(enc_lang_goal_instr)
return cont_lang_goal_instr, enc_lang_goal_instr
def reset(self):
self.r_state = {
'state_t': None,
'e_t': None,
'cont_lang': None,
'enc_lang': None
}
def step(self, feat, prev_action=None):
if self.r_state['cont_lang'] is None and self.r_state['enc_lang'] is None:
self.r_state['cont_lang'], self.r_state['enc_lang'] = self.encode_lang(feat)
if self.r_state['e_t'] is None and self.r_state['state_t'] is None:
self.r_state['e_t'] = self.dec.go.repeat(self.r_state['enc_lang'].size(0), 1)
self.r_state['state_t'] = self.r_state['cont_lang'], torch.zeros_like(self.r_state['cont_lang'])
e_t = self.embed_action(prev_action) if prev_action is not None else self.r_state['e_t']
out_action_low, out_action_low_mask, state_t, *_ = self.dec.step(self.r_state['enc_lang'], feat['frames'][:, 0], e_t=e_t, state_tm1=self.r_state['state_t'])
self.r_state['state_t'] = state_t
self.r_state['e_t'] = self.dec.emb(out_action_low.max(1)[1])
feat['out_action_low'] = out_action_low.unsqueeze(0)
feat['out_action_low_mask'] = out_action_low_mask.unsqueeze(0)
return feat
def extract_preds(self, out, batch, feat, clean_special_tokens=True):
pred = {}
for ex, alow, alow_mask in zip(batch, feat['out_action_low'].max(2)[1].tolist(), feat['out_action_low_mask']):
if self.pad in alow:
pad_start_idx = alow.index(self.pad)
alow = alow[:pad_start_idx]
alow_mask = alow_mask[:pad_start_idx]
if clean_special_tokens:
if self.stop_token in alow:
stop_start_idx = alow.index(self.stop_token)
alow = alow[:stop_start_idx]
alow_mask = alow_mask[:stop_start_idx]
words = self.vocab['action_low'].index2word(alow)
alow_mask = F.sigmoid(alow_mask)
p_mask = [(alow_mask[t] > 0.5).cpu().numpy() for t in range(alow_mask.shape[0])]
task_id_ann = self.get_task_and_ann_id(ex)
pred[task_id_ann] = {
'action_low': ' '.join(words),
'action_low_mask': p_mask,
}
return pred
def embed_action(self, action):
device = torch.device('cuda') if self.args.gpu else torch.device('cpu')
action_num = torch.tensor(self.vocab['action_low'].word2index(action), device=device)
action_emb = self.dec.emb(action_num).unsqueeze(0)
return action_emb
def compute_loss(self, out, batch, feat):
losses = dict()
p_alow = out['out_action_low'].view(-1, len(self.vocab['action_low']))
l_alow = feat['action_low'].view(-1)
p_alow_mask = out['out_action_low_mask']
valid = feat['action_low_valid_interact']
pad_valid = (l_alow != self.pad)
alow_loss = F.cross_entropy(p_alow, l_alow, reduction='none')
alow_loss *= pad_valid.float()
alow_loss = alow_loss.mean()
losses['action_low'] = alow_loss * self.args.action_loss_wt
valid_idxs = valid.view(-1).nonzero().view(-1)
flat_p_alow_mask = p_alow_mask.view(p_alow_mask.shape[0]*p_alow_mask.shape[1], *p_alow_mask.shape[2:])[valid_idxs]
if flat_p_alow_mask.shape[0]!=0:
flat_alow_mask = torch.cat(feat['action_low_mask'], dim=0)
alow_mask_loss = self.weighted_mask_loss(flat_p_alow_mask, flat_alow_mask)
losses['action_low_mask'] = alow_mask_loss * self.args.mask_loss_wt
if self.args.subgoal_aux_loss_wt > 0:
p_subgoal = feat['out_subgoal'].squeeze(2)
l_subgoal = feat['subgoals_completed']
sg_loss = self.mse_loss(p_subgoal, l_subgoal)
sg_loss = sg_loss.view(-1) * pad_valid.float()
subgoal_loss = sg_loss.mean()
losses['subgoal_aux'] = self.args.subgoal_aux_loss_wt * subgoal_loss
if self.args.pm_aux_loss_wt > 0:
p_progress = feat['out_progress'].squeeze(2)
l_progress = feat['subgoal_progress']
pg_loss = self.mse_loss(p_progress, l_progress)
pg_loss = pg_loss.view(-1) * pad_valid.float()
progress_loss = pg_loss.mean()
losses['progress_aux'] = self.args.pm_aux_loss_wt * progress_loss
return losses
def weighted_mask_loss(self, pred_masks, gt_masks):
bce = self.bce_with_logits(pred_masks, gt_masks)
flipped_mask = self.flip_tensor(gt_masks)
inside = (bce * gt_masks).sum() / (gt_masks).sum()
outside = (bce * flipped_mask).sum() / (flipped_mask).sum()
return inside + outside
def flip_tensor(self, tensor, on_zero=1, on_non_zero=0):
res = tensor.clone()
res[tensor == 0] = on_zero
res[tensor != 0] = on_non_zero
return res
def compute_metric(self, preds, data):
m = collections.defaultdict(list)
for task in data:
ex = self.load_task_json(task)
i = self.get_task_and_ann_id(ex)
label = ' '.join([a['discrete_action']['action'] for a in ex['plan']['low_actions']])
m['action_low_f1'].append(compute_f1(label.lower(), preds[i]['action_low'].lower()))
m['action_low_em'].append(compute_exact(label.lower(), preds[i]['action_low'].lower()))
return {k: sum(v)/len(v) for k, v in m.items()}
| true | true |
f723b7ecde46f054272f9893ebd0e63cffbd1cd6 | 6,773 | py | Python | tests/gamestonk_terminal/etf/discovery/test_disc_controller.py | GarnixJu2015/GamestonkTerminal | ec400e46ddce4ac934af836b863528f14a13d865 | [
"MIT"
] | null | null | null | tests/gamestonk_terminal/etf/discovery/test_disc_controller.py | GarnixJu2015/GamestonkTerminal | ec400e46ddce4ac934af836b863528f14a13d865 | [
"MIT"
] | null | null | null | tests/gamestonk_terminal/etf/discovery/test_disc_controller.py | GarnixJu2015/GamestonkTerminal | ec400e46ddce4ac934af836b863528f14a13d865 | [
"MIT"
] | null | null | null | # IMPORTATION STANDARD
import os
# IMPORTATION THIRDPARTY
import pandas as pd
import pytest
# IMPORTATION INTERNAL
from gamestonk_terminal.etf.discovery import disc_controller
# pylint: disable=E1101
# pylint: disable=W0603
# pylint: disable=E1111
EMPTY_DF = pd.DataFrame()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["load", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
# MOCK SWITCH
mocker.patch(
target=f"{path_controller}.DiscoveryController.switch",
return_value=["quit"],
)
result_menu = disc_controller.DiscoveryController(queue=queue).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
# ENABLE AUTO-COMPLETION : HELPER_FUNCS.MENU
mocker.patch(
target="gamestonk_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="gamestonk_terminal.parent_classes.session",
)
mocker.patch(
target="gamestonk_terminal.parent_classes.session.prompt",
return_value="quit",
)
mocker.patch(
target="gamestonk_terminal.etf.financedatabase_model.get_etfs_categories",
return_value=["Bank Loan"],
)
# DISABLE AUTO-COMPLETION : CONTROLLER.COMPLETER
mocker.patch.object(
target=disc_controller.gtff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target=f"{path_controller}.session",
)
mocker.patch(
target=f"{path_controller}.session.prompt",
return_value="quit",
)
result_menu = disc_controller.DiscoveryController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
# DISABLE AUTO-COMPLETION
mocker.patch.object(
target=disc_controller.gtff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target=f"{path_controller}.session",
return_value=None,
)
# MOCK USER INPUT
mocker.patch("builtins.input", return_value=mock_input)
# MOCK SWITCH
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=f"{path_controller}.DiscoveryController.switch",
new=mock_switch,
)
result_menu = disc_controller.DiscoveryController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = disc_controller.DiscoveryController(queue=None)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["quit", "quit", "help"]),
("help/help", ["help"]),
("q", ["quit"]),
("h", []),
(
"r",
[
"quit",
"quit",
"reset",
"etf",
"disc",
],
),
],
)
def test_switch(an_input, expected_queue):
controller = disc_controller.DiscoveryController(queue=None)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = disc_controller.DiscoveryController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
[
"quit",
"quit",
"reset",
"etf",
"disc",
],
),
(
"call_reset",
["help"],
[
"quit",
"quit",
"reset",
"etf",
"disc",
"help",
],
),
],
)
def test_call_func_expect_queue(expected_queue, func, queue):
controller = disc_controller.DiscoveryController(queue=queue)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, other_args, mocked_func, called_args, called_kwargs",
[
(
"call_gainers",
["-l=10"],
"wsj_view.show_top_mover",
["gainers", 10, ""],
dict(),
),
(
"call_decliners",
["-l=10"],
"wsj_view.show_top_mover",
["decliners", 10, ""],
dict(),
),
(
"call_active",
["-l=10"],
"wsj_view.show_top_mover",
["active", 10, ""],
dict(),
),
],
)
def test_call_func_test(
tested_func, mocked_func, other_args, called_args, called_kwargs, mocker
):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
if mocked_func:
mock = mocker.Mock()
mocker.patch(
target=f"{path_controller}.{mocked_func}",
new=mock,
)
controller = disc_controller.DiscoveryController(queue=None)
getattr(controller, tested_func)(other_args)
if called_args or called_kwargs:
mock.assert_called_once_with(*called_args, **called_kwargs)
else:
mock.assert_called_once()
else:
controller = disc_controller.DiscoveryController(queue=None)
getattr(controller, tested_func)(other_args)
| 25.272388 | 82 | 0.579655 |
import os
import pandas as pd
import pytest
from gamestonk_terminal.etf.discovery import disc_controller
EMPTY_DF = pd.DataFrame()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"queue, expected",
[
(["load", "help"], []),
(["quit", "help"], ["help"]),
],
)
def test_menu_with_queue(expected, mocker, queue):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
mocker.patch(
target=f"{path_controller}.DiscoveryController.switch",
return_value=["quit"],
)
result_menu = disc_controller.DiscoveryController(queue=queue).menu()
assert result_menu == expected
@pytest.mark.vcr(record_mode="none")
def test_menu_without_queue_completion(mocker):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
mocker.patch(
target="gamestonk_terminal.feature_flags.USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target="gamestonk_terminal.parent_classes.session",
)
mocker.patch(
target="gamestonk_terminal.parent_classes.session.prompt",
return_value="quit",
)
mocker.patch(
target="gamestonk_terminal.etf.financedatabase_model.get_etfs_categories",
return_value=["Bank Loan"],
)
mocker.patch.object(
target=disc_controller.gtff,
attribute="USE_PROMPT_TOOLKIT",
new=True,
)
mocker.patch(
target=f"{path_controller}.session",
)
mocker.patch(
target=f"{path_controller}.session.prompt",
return_value="quit",
)
result_menu = disc_controller.DiscoveryController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"mock_input",
["help", "homee help", "home help", "mock"],
)
def test_menu_without_queue_sys_exit(mock_input, mocker):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
mocker.patch.object(
target=disc_controller.gtff,
attribute="USE_PROMPT_TOOLKIT",
new=False,
)
mocker.patch(
target=f"{path_controller}.session",
return_value=None,
)
mocker.patch("builtins.input", return_value=mock_input)
class SystemExitSideEffect:
def __init__(self):
self.first_call = True
def __call__(self, *args, **kwargs):
if self.first_call:
self.first_call = False
raise SystemExit()
return ["quit"]
mock_switch = mocker.Mock(side_effect=SystemExitSideEffect())
mocker.patch(
target=f"{path_controller}.DiscoveryController.switch",
new=mock_switch,
)
result_menu = disc_controller.DiscoveryController(queue=None).menu()
assert result_menu == []
@pytest.mark.vcr(record_mode="none")
@pytest.mark.record_stdout
def test_print_help():
controller = disc_controller.DiscoveryController(queue=None)
controller.print_help()
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"an_input, expected_queue",
[
("", []),
("/help", ["quit", "quit", "help"]),
("help/help", ["help"]),
("q", ["quit"]),
("h", []),
(
"r",
[
"quit",
"quit",
"reset",
"etf",
"disc",
],
),
],
)
def test_switch(an_input, expected_queue):
controller = disc_controller.DiscoveryController(queue=None)
queue = controller.switch(an_input=an_input)
assert queue == expected_queue
@pytest.mark.vcr(record_mode="none")
def test_call_cls(mocker):
mocker.patch("os.system")
controller = disc_controller.DiscoveryController(queue=None)
controller.call_cls([])
assert controller.queue == []
os.system.assert_called_once_with("cls||clear")
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, queue, expected_queue",
[
(
"call_exit",
[],
["quit", "quit", "quit"],
),
("call_exit", ["help"], ["quit", "quit", "quit", "help"]),
("call_home", [], ["quit", "quit"]),
("call_help", [], []),
("call_quit", [], ["quit"]),
("call_quit", ["help"], ["quit", "help"]),
(
"call_reset",
[],
[
"quit",
"quit",
"reset",
"etf",
"disc",
],
),
(
"call_reset",
["help"],
[
"quit",
"quit",
"reset",
"etf",
"disc",
"help",
],
),
],
)
def test_call_func_expect_queue(expected_queue, func, queue):
controller = disc_controller.DiscoveryController(queue=queue)
result = getattr(controller, func)([])
assert result is None
assert controller.queue == expected_queue
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"tested_func, other_args, mocked_func, called_args, called_kwargs",
[
(
"call_gainers",
["-l=10"],
"wsj_view.show_top_mover",
["gainers", 10, ""],
dict(),
),
(
"call_decliners",
["-l=10"],
"wsj_view.show_top_mover",
["decliners", 10, ""],
dict(),
),
(
"call_active",
["-l=10"],
"wsj_view.show_top_mover",
["active", 10, ""],
dict(),
),
],
)
def test_call_func_test(
tested_func, mocked_func, other_args, called_args, called_kwargs, mocker
):
path_controller = "gamestonk_terminal.etf.discovery.disc_controller"
if mocked_func:
mock = mocker.Mock()
mocker.patch(
target=f"{path_controller}.{mocked_func}",
new=mock,
)
controller = disc_controller.DiscoveryController(queue=None)
getattr(controller, tested_func)(other_args)
if called_args or called_kwargs:
mock.assert_called_once_with(*called_args, **called_kwargs)
else:
mock.assert_called_once()
else:
controller = disc_controller.DiscoveryController(queue=None)
getattr(controller, tested_func)(other_args)
| true | true |
f723b9c35cf5df83c827a8fdfdc3fba5d26b81e7 | 728 | py | Python | Python/FromUniversity/PYQT5/testgame.py | programmer-666/Codes | fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b | [
"MIT"
] | null | null | null | Python/FromUniversity/PYQT5/testgame.py | programmer-666/Codes | fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b | [
"MIT"
] | null | null | null | Python/FromUniversity/PYQT5/testgame.py | programmer-666/Codes | fdffe38a789ba3636dff7ceaa9f1b4113ae17c2b | [
"MIT"
] | 1 | 2021-09-16T14:24:29.000Z | 2021-09-16T14:24:29.000Z | import sys
from PyQt5 import QtWidgets
class game(QtWidgets.QMainWindow):
def __init__(self):
self.ap = QtWidgets.QApplication(sys.argv)
super(game, self).__init__()
self.setGeometry(100,100,200,200)
self.conts()
self.x = 50
self.y = 50
def moving(self):
self.x+=10
self.obj1.move(self.x, self.y)
def conts(self):
self.obj1 = QtWidgets.QLabel(self)
self.obj1.setText("0")
self.obj1.move(50,50)
# object
self.rb = QtWidgets.QPushButton(self)
self.rb.clicked.connect(self.moving)
# movement
def showtime(self):
self.show()
sys.exit(self.ap.exec_())
test = game()
test.showtime() | 28 | 50 | 0.590659 | import sys
from PyQt5 import QtWidgets
class game(QtWidgets.QMainWindow):
def __init__(self):
self.ap = QtWidgets.QApplication(sys.argv)
super(game, self).__init__()
self.setGeometry(100,100,200,200)
self.conts()
self.x = 50
self.y = 50
def moving(self):
self.x+=10
self.obj1.move(self.x, self.y)
def conts(self):
self.obj1 = QtWidgets.QLabel(self)
self.obj1.setText("0")
self.obj1.move(50,50)
self.rb = QtWidgets.QPushButton(self)
self.rb.clicked.connect(self.moving)
def showtime(self):
self.show()
sys.exit(self.ap.exec_())
test = game()
test.showtime() | true | true |
f723bb3a1ed4cb91e25d960a0f850160a5553547 | 112,619 | py | Python | pygsti/models/modelconstruction.py | maij/pyGSTi | 70e83e05fa689f53550feb3914c4fac40ca4a943 | [
"Apache-2.0"
] | 73 | 2016-01-28T05:02:05.000Z | 2022-03-30T07:46:33.000Z | pygsti/models/modelconstruction.py | 00mjk/pyGSTi | 4f8bf5337b01b7afcb7b0580b717b5d1fe281be4 | [
"Apache-2.0"
] | 113 | 2016-02-25T15:32:18.000Z | 2022-03-31T13:18:13.000Z | pygsti/models/modelconstruction.py | 00mjk/pyGSTi | 4f8bf5337b01b7afcb7b0580b717b5d1fe281be4 | [
"Apache-2.0"
] | 41 | 2016-03-15T19:32:07.000Z | 2022-02-16T10:22:05.000Z | """
Functions for the construction of new models.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import collections as _collections
import itertools as _itertools
from os import stat
from pygsti.modelmembers.instruments.instrument import Instrument
import numpy as _np
import scipy as _scipy
import scipy.linalg as _spl
from pygsti.evotypes import Evotype as _Evotype
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import povms as _povm
from pygsti.modelmembers import states as _state
from pygsti.modelmembers import instruments as _instrument
from pygsti.modelmembers.operations import opfactory as _opfactory
from pygsti.models import stencillabel as _stencil
from pygsti.models.modelnoise import OpModelNoise as _OpModelNoise
from pygsti.models.modelnoise import OpModelPerOpNoise as _OpModelPerOpNoise
from pygsti.models.modelnoise import ComposedOpModelNoise as _ComposedOpModelNoise
from pygsti.models.modelnoise import LindbladNoise as _LindbladNoise
from pygsti.models.modelnoise import StochasticNoise as _StochasticNoise
from pygsti.models.modelnoise import DepolarizationNoise as _DepolarizationNoise
from pygsti.models import explicitmodel as _emdl
from pygsti.models import gaugegroup as _gg
from pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel
from pygsti.models.cloudnoisemodel import CloudNoiseModel as _CloudNoiseModel
from pygsti.baseobjs import label as _label
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis
from pygsti.baseobjs.basis import DirectSumBasis as _DirectSumBasis
from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph
from pygsti.tools import basistools as _bt
from pygsti.tools import internalgates as _itgs
from pygsti.tools import optools as _ot
from pygsti.tools import listtools as _lt
from pygsti.baseobjs.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from pygsti.tools.legacytools import deprecate as _deprecated_fn
#############################################
# Build gates based on "standard" gate names
############################################
def create_spam_vector(vec_expr, state_space, basis):
"""
Build a rho or E vector from an expression.
Parameters
----------
vec_expr : string
the expression which determines which vector to build. Currenlty, only
integers are allowed, which specify a the vector for the pure state of
that index. For example, "1" means return vectorize(``|1><1|``). The
index labels the absolute index of the state within the entire state
space, and is independent of the direct-sum decomposition of density
matrix space.
state_space : StateSpace
The state space that the created operation should act upon.
basis : str or Basis
The basis of the returned vector. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
The vector specified by vec_expr in the desired basis.
"""
#So far just allow integer prep_expressions that give the index of state (within the state space) that we
#prep/measure
try:
index = int(vec_expr)
except:
raise ValueError("Expression must be the index of a state (as a string)")
state_space = _statespace.StateSpace.cast(state_space)
if isinstance(basis, str):
basis = _Basis.cast(basis, state_space)
assert (state_space.dim == basis.dim), \
"State space labels dim (%s) != basis dim (%s)" % (state_space.dim, basis.dim)
#standard basis that has the same direct-sum structure as `basis`:
std_basis = basis.create_equivalent('std')
vecInSimpleStdBasis = _np.zeros(std_basis.elshape, 'd') # a matrix, but flattened it is our spamvec
vecInSimpleStdBasis[index, index] = 1.0 # now a matrix with just a single 1 on the diag
vecInReducedStdBasis = _np.dot(std_basis.from_elementstd_transform_matrix, vecInSimpleStdBasis.flatten())
# translates the density matrix / state vector to the std basis with our desired block structure
vec = _bt.change_basis(vecInReducedStdBasis, std_basis, basis)
return vec.reshape(-1, 1)
def create_identity_vec(basis):
"""
Build a the identity vector for a given space and basis.
Parameters
----------
basis : Basis object
The basis of the returned vector. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
Returns
-------
numpy array
The identity vector in the desired basis.
"""
opDim = basis.dim
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
# assume index given as vec_expr refers to a Hilbert-space state index, so "reduced-std" basis
vecInReducedStdBasis = _np.zeros((opDim, 1), 'd')
#set all diagonal elements of density matrix to 1.0 (end result = identity density mx)
start = 0; vecIndex = 0
for blockVecDim in blockDims:
blockDim = int(_np.sqrt(blockVecDim)) # vec -> matrix dim
for i in range(start, start + blockDim):
for j in range(start, start + blockDim):
if i == j: vecInReducedStdBasis[vecIndex, 0] = 1.0 # set diagonal element of density matrix
vecIndex += 1
start += blockDim
return _bt.change_basis(vecInReducedStdBasis, "std", basis)
def create_operation(op_expr, state_space, basis="pp", parameterization="full", evotype='default'):
"""
Build an operation object from an expression.
Parameters
----------
op_expr : string
expression for the gate to build. String is first split into parts
delimited by the colon (:) character, which are composed together to
create the final gate. Each part takes on of the allowed forms:
- I(ssl_0, ...) = identity operation on one or more state space labels
(ssl_i)
- X(theta, ssl) = x-rotation by theta radians of qubit labeled by ssl
- Y(theta, ssl) = y-rotation by theta radians of qubit labeled by ssl
- Z(theta, ssl) = z-rotation by theta radians of qubit labeled by ssl
- CX(theta, ssl0, ssl1) = controlled x-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CY(theta, ssl0, ssl1) = controlled y-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CZ(theta, ssl0, ssl1) = controlled z-rotation by theta radians. Acts
on qubit labeled by ssl1 with ssl0 being the control.
- CNOT(ssl0, ssl1) = standard controlled-not gate. Acts on qubit
labeled by ssl1 with ssl0 being the control.
- CPHASE(ssl0, ssl1) = standard controlled-phase gate. Acts on qubit
labeled by ssl1 with ssl0 being the control.
- LX(theta, i0, i1) = leakage between states i0 and i1. Implemented as
an x-rotation between states with integer indices i0 and i1 followed
by complete decoherence between the states.
state_space : StateSpace
The state space that the created operation should act upon.
basis : str or Basis
The basis the returned operation should be represented in.
parameterization : {"full","TP","static"}, optional
How to parameterize the resulting gate.
- "full" = return a FullArbitraryOp.
- "TP" = return a FullTPOp.
- "static" = return a StaticArbitraryOp.
evotype : Evotype or str, optional
The evolution type of this operation, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
LinearOperator
A gate object representing the gate given by op_expr in the desired
basis.
"""
# op_expr can contain single qubit ops: X(theta) ,Y(theta) ,Z(theta)
# two qubit ops: CNOT
# clevel qubit ops: Leak
# two clevel opts: Flip
# each of which is given additional parameters specifying which indices it acts upon
#Working with a StateSpaceLabels object gives us access to all the info we'll need later
state_space = _statespace.StateSpace.cast(state_space)
if isinstance(basis, str):
basis = _Basis.cast(basis, state_space)
assert(state_space.dim == basis.dim), \
"State space labels dim (%s) != basis dim (%s)" % (state_space.dim, basis.dim)
# ------------------------------------------------------------------------------------------------------------------
# -- Helper Functions ----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
def to_label(lbl):
""" Convert integer-strings to integers in state space label """
try: return int(lbl)
except: return lbl.strip()
def to_labels(lbls):
""" Convert integer-strings to integers in state space labels """
return [to_label(lbl) for lbl in lbls]
# ------------------------------------------------------------------------------------------------------------------
# -- End Helper Functions ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
#FUTURE?: type_preferences = ('static standard', 'static clifford', 'static unitary')
build_evotype = 'default'
superop_mxs_in_basis = []
exprTerms = op_expr.split(':')
for exprTerm in exprTerms:
l = exprTerm.index('('); r = exprTerm.rindex(')')
opName = exprTerm[0:l]
argsStr = exprTerm[l + 1:r]
args = argsStr.split(',')
if opName == "I":
# qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
labels = to_labels(args)
stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels]))
# a complex 2x2 mx unitary for the identity in Pauli-product basis
Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype)
#FUTURE?:
# stdname = 'Gi' if (stateSpaceUDim == 2) else None
# Uop = _op.create_from_unitary_mx(_np.identity(stateSpaceUDim, complex), type_preferences, 'pp',
# stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, labels, Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "D":
# like 'I', but only parameterize the diagonal elements - so can be a depolarization-type map
raise NotImplementedError("Removed temporarily - need to update using embedded gates")
# # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
# labels = to_labels(args)
# stateSpaceDim = sslbls.product_dim(labels)
# if parameterization not in ("linear","linearTP"):
# raise ValueError("'D' gate only makes sense to use when and parameterization == 'linear'")
# if defaultI2P == "TP":
# # parameterize only the diagonals els after the first
# indicesToParameterize = [ (i,i) for i in range(1,stateSpaceDim**2) ]
# else:
# # parameterize only the diagonals els
# indicesToParameterize = [ (i,i) for i in range(0,stateSpaceDim**2) ]
# # *real* 4x4 mx in Pauli-product basis -- still just the identity!
# pp_opMx = _np.identity(stateSpaceDim**2, 'd')
# # pp_opMx assumed to be in the Pauli-product basis
# opTermInFinalBasis = embed_operation(pp_opMx, tuple(labels), indicesToParameterize)
elif opName in ('X', 'Y', 'Z'): # single-qubit gate names
assert(len(args) == 2) # theta, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label = to_label(args[1])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
if opName == 'X': ex = -1j * theta * sigmax / 2
elif opName == 'Y': ex = -1j * theta * sigmay / 2
elif opName == 'Z': ex = -1j * theta * sigmaz / 2
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', build_evotype)
#FUTURE?:
#stdname = None
#if _np.isclose(theta, _np.pi): stdname = 'G%spi' % opName.lower()
#elif _np.isclose(theta, _np.pi/2): stdname = 'G%spi2' % opName.lower()
# Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == 'N': # more general single-qubit gate
assert(len(args) == 5) # theta, sigmaX-coeff, sigmaY-coeff, sigmaZ-coeff, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
sxCoeff = eval(args[1], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
syCoeff = eval(args[2], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
szCoeff = eval(args[3], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
label = to_label(args[4])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
ex = -1j * theta * (sxCoeff * sigmax / 2. + syCoeff * sigmay / 2. + szCoeff * sigmaz / 2.)
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', evotype=build_evotype)
#FUTURE?: Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName in ('CX', 'CY', 'CZ', 'CNOT', 'CPHASE'): # two-qubit gate names
if opName in ('CX', 'CY', 'CZ'):
assert(len(args) == 3) # theta, qubit-label1, qubit-label2
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label1 = to_label(args[1]); label2 = to_label(args[2])
if opName == 'CX': ex = -1j * theta * sigmax / 2
elif opName == 'CY': ex = -1j * theta * sigmay / 2
elif opName == 'CZ': ex = -1j * theta * sigmaz / 2
Utarget = _spl.expm(ex) # 2x2 unitary matrix operating on target qubit
else: # opName in ('CNOT','CPHASE')
assert(len(args) == 2) # qubit-label1, qubit-label2
label1 = to_label(args[0]); label2 = to_label(args[1])
if opName == 'CNOT':
Utarget = _np.array([[0, 1],
[1, 0]], 'd')
elif opName == 'CPHASE':
Utarget = _np.array([[1, 0],
[0, -1]], 'd')
# 4x4 unitary matrix operating on isolated two-qubit space
U = _np.identity(4, 'complex'); U[2:, 2:] = Utarget
assert(state_space.label_dimension(label1) == 4 and state_space.label_dimension(label2) == 4), \
"%s gate must act on qubits!" % opName
# complex 4x4 unitary matrix operating on two-qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(U, 'pp', build_evotype)
#FUTURE?:
# if opName == "CNOT": stdname = "Gcnot"
# elif opName == "CPHASE": stdname = "Gcphase"
# else: stdname = None
# Uop = _op.create_from_unitary_mx(U, type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space
Uop_embed = _op.EmbeddedOp(state_space, [label1, label2], Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "LX": # TODO - better way to describe leakage?
assert(len(args) == 3) # theta, dmIndex1, dmIndex2 - X rotation between any two density matrix basis states
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
i1 = int(args[1]) # row/column index of a single *state* within the density matrix
i2 = int(args[2]) # row/column index of a single *state* within the density matrix
ex = -1j * theta * sigmax / 2
Uop = _spl.expm(ex) # 2x2 unitary matrix operating on the i1-th and i2-th states of the state space basis
opDim = basis.dim
dmDim = int(_np.sqrt(basis.elsize)) # matrix dim of the "embedding space"
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
Utot = _np.identity(dmDim, 'complex')
Utot[i1, i1] = Uop[0, 0]
Utot[i1, i2] = Uop[0, 1]
Utot[i2, i1] = Uop[1, 0]
Utot[i2, i2] = Uop[1, 1]
# dmDim^2 x dmDim^2 mx operating on vectorized total densty matrix
opTermInStdBasis = _ot.unitary_to_process_mx(Utot)
# contract [3] to [2, 1]
embedded_std_basis = _Basis.cast('std', 9) # [2]
std_basis = _Basis.cast('std', blockDims) # std basis w/blockdim structure, i.e. [4,1]
opTermInReducedStdBasis = _bt.resize_std_mx(opTermInStdBasis, 'contract',
embedded_std_basis, std_basis)
superop_mx_in_basis = _bt.change_basis(opTermInReducedStdBasis, std_basis, basis)
else: raise ValueError("Invalid gate name: %s" % opName)
superop_mxs_in_basis.append(superop_mx_in_basis)
#Note: expressions are listed in "matrix composition order"
final_superop_mx = superop_mxs_in_basis[0]
for mx in superop_mxs_in_basis[1:]:
final_superop_mx = _np.dot(final_superop_mx, mx)
if basis.real:
assert(_np.linalg.norm(final_superop_mx.imag) < 1e-6), "Operation matrix should be real but isn't!"
final_superop_mx = _np.real(final_superop_mx)
return _op.create_from_superop_mx(final_superop_mx, parameterization, basis,
evotype=evotype, state_space=state_space)
def _create_explicit_model_from_expressions(state_space, basis,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', gate_type="full", prep_type="auto",
povm_type="auto", instrument_type="auto", evotype='default'):
"""
Build a new Model given lists of operation labels and expressions.
Parameters
----------
state_space : StateSpace
The state space for this model.
basis : Basis object
The source and destination basis, respectively. Allowed
values are Matrix-unit (std), Gell-Mann (gm), Pauli-product (pp),
and Qutrit (qt) (or a custom basis object).
op_labels : list of strings
A list of labels for each created gate in the final model. To
conform with text file parsing conventions these names should begin
with a capital G and can be followed by any number of lowercase
characters, numbers, or the underscore character.
op_expressions : list of strings
A list of gate expressions, each corresponding to a operation label in
op_labels, which determine what operation each gate performs (see
documentation for :meth:`create_operation`).
prep_labels : list of string, optional
A list of labels for each created state preparation in the final
model. To conform with conventions these labels should begin with
"rho".
prep_expressions : list of strings, optional
A list of vector expressions for each state preparation vector (see
documentation for :meth:`_create_spam_vector`).
effect_labels : list, optional
If `povm_labels` is a string, then this is just a list of the effect
(outcome) labels for the single POVM. If `povm_labels` is a tuple,
then `effect_labels` must be a list of lists of effect labels, each
list corresponding to a POVM. If set to the special string `"standard"`
then the length-n binary strings are used when the state space consists
of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and
the labels `"0"`, `"1"`, ... `"<dim>"` are used, where `<dim>`
is the dimension of the state space, in all non-qubit cases.
effect_expressions : list, optional
A list or list-of-lists of (string) vector expressions for each POVM
effect vector (see documentation for :meth:`_create_spam_vector`). Expressions
correspond to labels in `effect_labels`. If set to the special string
`"standard"`, then the expressions `"0"`, `"1"`, ... `"<dim>"` are used,
where `<dim>` is the dimension of the state space.
povm_labels : list or string, optional
A list of POVM labels, or a single (string) label. In the latter case,
only a single POVM is created and the format of `effect_labels` and
`effect_expressions` is simplified (see above).
parameterization : {"full","TP","static"}, optional
How to parameterize the gates of the resulting Model (see
documentation for :meth:`create_operation`).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
Model
The created model.
"""
#defP = "TP" if (parameterization in ("TP","linearTP")) else "full"
state_space = _statespace.StateSpace.cast(state_space)
ret = _emdl.ExplicitOpModel(state_space, basis.copy(), default_gate_type=gate_type,
default_prep_type=prep_type, default_povm_type=povm_type,
default_instrument_type=instrument_type, evotype=evotype)
#prep_prefix="rho", effect_prefix="E", gate_prefix="G")
if prep_type == "auto":
prep_type = _state.state_type_from_op_type(gate_type)
if povm_type == "auto":
povm_type = _povm.povm_type_from_op_type(gate_type)
if instrument_type == "auto":
instrument_type = _instrument.instrument_type_from_op_type(gate_type)
for label, rhoExpr in zip(prep_labels, prep_expressions):
vec = create_spam_vector(rhoExpr, state_space, basis)
ret.preps[label] = _state.create_from_dmvec(vec, prep_type, basis, evotype, state_space)
if isinstance(povm_labels, str):
povm_labels = [povm_labels]
effect_labels = [effect_labels]
effect_expressions = [effect_expressions]
dmDim = int(_np.sqrt(basis.dim)) # "densitymx" evotype assumed... FIX?
for povmLbl, ELbls, EExprs in zip(povm_labels,
effect_labels, effect_expressions):
effect_vecs = {}
if ELbls == "standard":
qubit_dim = 4
if state_space.num_tensor_product_blocks == 1 and \
all([ldim == qubit_dim for ldim in state_space.tensor_product_block_dimensions(0)]):
# a single tensor product block comprised of qubits: '000', '001', etc.
nQubits = len(state_space.tensor_product_block_dimensions(0))
ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]
else:
ELbls = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim
if EExprs == "standard":
EExprs = list(map(str, range(dmDim))) # standard = 0,1,...,dmDim
effect_vecs = {label: create_spam_vector(expr, state_space, basis)
for label, expr in zip(ELbls, EExprs)}
if len(effect_vecs) > 0: # don't add POVMs with 0 effects
ret.povms[povmLbl] = _povm.create_from_dmvecs(effect_vecs, povm_type, basis, evotype, state_space)
for (opLabel, opExpr) in zip(op_labels, op_expressions):
ret.operations[opLabel] = create_operation(opExpr, state_space, basis, gate_type, evotype)
if gate_type == "full":
ret.default_gauge_group = _gg.FullGaugeGroup(ret.state_space, evotype)
elif gate_type == "full TP":
ret.default_gauge_group = _gg.TPGaugeGroup(ret.state_space, evotype)
elif gate_type == 'CPTP':
ret.default_gauge_group = _gg.UnitaryGaugeGroup(ret.state_space, basis, evotype)
else:
ret.default_gauge_group = _gg.TrivialGaugeGroup(ret.state_space)
ret._clean_paramvec()
return ret
def create_explicit_model_from_expressions(state_space,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', basis="auto", gate_type="full",
prep_type="auto", povm_type="auto", instrument_type="auto",
evotype='default'):
"""
Build a new :class:`ExplicitOpModel` given lists of labels and expressions.
Parameters
----------
state_space : StateSpace
the state space for the model.
op_labels : list of strings
A list of labels for each created gate in the final model. To
conform with text file parsing conventions these names should begin
with a capital G and can be followed by any number of lowercase
characters, numbers, or the underscore character.
op_expressions : list of strings
A list of gate expressions, each corresponding to a operation label in
op_labels, which determine what operation each gate performs (see
documentation for :meth:`create_operation`).
prep_labels : list of string
A list of labels for each created state preparation in the final
model. To conform with conventions these labels should begin with
"rho".
prep_expressions : list of strings
A list of vector expressions for each state preparation vector (see
documentation for :meth:`_create_spam_vector`).
effect_labels : list, optional
If `povm_labels` is a string, then this is just a list of the effect
(outcome) labels for the single POVM. If `povm_labels` is a tuple,
then `effect_labels` must be a list of lists of effect labels, each
list corresponding to a POVM. If set to the special string `"standard"`
then the length-n binary strings are used when the state space consists
of n qubits (e.g. `"000"`, `"001"`, ... `"111"` for 3 qubits) and
the labels `"0"`, `"1"`, ... `"<dim>"` are used, where `<dim>`
is the dimension of the state space, in all non-qubit cases.
effect_expressions : list, optional
A list or list-of-lists of (string) vector expressions for each POVM
effect vector (see documentation for :meth:`_create_spam_vector`). Expressions
correspond to labels in `effect_labels`. If set to the special string
`"standard"`, then the expressions `"0"`, `"1"`, ... `"<dim>"` are used,
where `<dim>` is the dimension of the state space.
povm_labels : list or string, optional
A list of POVM labels, or a single (string) label. In the latter case,
only a single POVM is created and the format of `effect_labels` and
`effect_expressions` is simplified (see above).
basis : {'gm','pp','std','qt','auto'}, optional
the basis of the matrices in the returned Model
- "std" = operation matrix operates on density mx expressed as sum of matrix
units
- "gm" = operation matrix operates on dentity mx expressed as sum of
normalized Gell-Mann matrices
- "pp" = operation matrix operates on density mx expresses as sum of
tensor-product of Pauli matrices
- "qt" = operation matrix operates on density mx expressed as sum of
Qutrit basis matrices
- "auto" = "pp" if possible (integer num of qubits), "qt" if density
matrix dim == 3, and "gm" otherwise.
parameterization : {"full","TP"}, optional
How to parameterize the gates of the resulting Model (see
documentation for :meth:`create_operation`).
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
Returns
-------
ExplicitOpModel
The created model.
"""
#Note: so far, all allowed `parameterization` values => densitymx evotype
state_space = _statespace.StateSpace.cast(state_space)
stateSpaceDim = state_space.dim
# Note: what about state_space_labels.tpb_dims?
if basis == "auto":
if _np.isclose(_np.log2(stateSpaceDim) / 2,
round(_np.log2(stateSpaceDim) / 2)):
basis = "pp"
elif stateSpaceDim == 9:
basis = "qt"
else: basis = "gm"
return _create_explicit_model_from_expressions(state_space,
_Basis.cast(basis, state_space),
op_labels, op_expressions,
prep_labels, prep_expressions,
effect_labels, effect_expressions,
povm_labels, gate_type=gate_type,
prep_type=prep_type, povm_type=povm_type,
instrument_type=instrument_type, evotype=evotype)
def create_explicit_alias_model(mdl_primitives, alias_dict):
"""
Creates a model by applying aliases to an existing model.
The new model is created by composing the gates of an existing `Model`,
`mdl_primitives`, according to a dictionary of `Circuit`s, `alias_dict`.
The keys of `alias_dict` are the operation labels of the returned `Model`.
state preparations and POVMs are unaltered, and simply copied from `mdl_primitives`.
Parameters
----------
mdl_primitives : Model
A Model containing the "primitive" gates (those used to compose
the gates of the returned model).
alias_dict : dictionary
A dictionary whose keys are strings and values are Circuit objects
specifying sequences of primitive gates. Each key,value pair specifies
the composition rule for a creating a gate in the returned model.
Returns
-------
Model
A model whose gates are compositions of primitive gates and whose
spam operations are the same as those of `mdl_primitives`.
"""
mdl_new = mdl_primitives.copy()
for gl in mdl_primitives.operations.keys():
del mdl_new.operations[gl] # remove all gates from mdl_new
for gl, opstr in alias_dict.items():
mdl_new.operations[gl] = mdl_primitives.sim.product(opstr)
#Creates fully parameterized gates by default...
mdl_new._clean_paramvec()
return mdl_new
def create_explicit_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_spam_type='computational',
embed_gates=False, basis='pp'):
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_explicit_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, ideal_gate_type, ideal_spam_type, ideal_spam_type, embed_gates, basis)
def _create_explicit_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_prep_type='auto', ideal_povm_type='auto',
embed_gates=False, basis='pp'):
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if custom_gates is None:
custom_gates = {}
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
def _embed_unitary(statespace, target_labels, unitary):
dummyop = _op.EmbeddedOp(statespace, target_labels,
_op.StaticUnitaryOp(unitary, basis='pp', evotype="statevec_slow")) # basis hardcode?
return dummyop.to_dense("Hilbert")
local_gates = _setup_local_gates(processor_spec, evotype, None, {}, ideal_gate_type) # no custom *local* gates
ret = _emdl.ExplicitOpModel(state_space, basis, default_gate_type=ideal_gate_type, evotype=evotype,
simulator=simulator)
# Special rule: when initializng an explicit model, if the processor spec has an implied global idle
# gate (e.g. "(idle)", then the created model instead has a empty-tuple Label as the key for this op.
global_idle_name = processor_spec.global_idle_gate_name
if (global_idle_name is not None) and global_idle_name.startswith('(') and global_idle_name.endswith(')'):
gn_to_make_emptytup = global_idle_name
else:
gn_to_make_emptytup = None
for gn, gate_unitary in processor_spec.gate_unitaries.items():
gate_is_factory = callable(gate_unitary)
resolved_avail = processor_spec.resolved_availability(gn)
if callable(resolved_avail) or resolved_avail == '*':
assert (embed_gates), "Cannot create factories with `embed_gates=False` yet!"
key = _label.Label(gn) if (gn != gn_to_make_emptytup) else _label.Label(())
allowed_sslbls_fn = resolved_avail if callable(resolved_avail) else None
gate_nQubits = processor_spec.gate_num_qubits(gn)
ideal_factory = _opfactory.EmbeddingOpFactory(
state_space, local_gates[gn], num_target_labels=gate_nQubits, allowed_sslbls_fn=allowed_sslbls_fn)
noiseop = modelnoise.create_errormap(key, evotype, state_space) # No target indices... just local errs?
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else: # resolved_avail is a list/tuple of available sslbls for the current gate/factory
for inds in resolved_avail: # inds are target qubit labels
key = _label.Label(()) if (inds is None and gn == gn_to_make_emptytup) else _label.Label(gn, inds)
if key in custom_gates: # allow custom_gates to specify gate elements directly
if isinstance(custom_gates[key], _opfactory.OpFactory):
ret.factories[key] = custom_gates[key]
elif isinstance(custom_gates[key], _op.LinearOperator):
ret.operations[key] = custom_gates[key]
else: # presumably a numpy array or something like it.
ret.operations[key] = _op.StaticArbitraryOp(custom_gates[key], evotype,
state_space) # static gates by default
continue
if gate_is_factory:
assert(embed_gates), "Cannot create factories with `embed_gates=False` yet!"
# TODO: check for modelnoise on *local* factory, i.e. create_errormap(gn, ...)??
if inds is None or inds == tuple(qubit_labels): # then no need to embed
ideal_factory = local_gates[gn]
else:
ideal_factory = _opfactory.EmbeddedOpFactory(state_space, inds, local_gates[gn])
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else:
if inds is None or inds == tuple(qubit_labels): # then no need to embed
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
assert(gate_unitary == len(qubit_labels)), \
"Idle unitary as int should be on all qubits for %s" % (str(gn))
ideal_gate = _op.ComposedOp([], evotype, state_space) # (identity gate on *all* qubits)
else:
ideal_gate = _op.create_from_unitary_mx(gate_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
else:
if embed_gates:
ideal_gate = local_gates[gn]
ideal_gate = _op.EmbeddedOp(state_space, inds, ideal_gate)
else:
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
gate_unitary = _np.identity(2**gate_unitary, 'd') # turn into explicit identity op
if gate_unitary.shape[0] == state_space.udim: # no need to embed!
embedded_unitary = gate_unitary
else:
embedded_unitary = _embed_unitary(state_space, inds, gate_unitary)
ideal_gate = _op.create_from_unitary_mx(embedded_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
#TODO: check for modelnoise on *local* gate, i.e. create_errormap(gn, ...)??
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
layer = _op.ComposedOp([ideal_gate, noiseop]) if (noiseop is not None) else ideal_gate
ret.operations[key] = layer
# SPAM:
local_noise = False; independent_gates = True; independent_spam = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
for k, v in prep_layers.items():
ret.preps[k] = v
for k, v in povm_layers.items():
ret.povms[k] = v
modelnoise.warn_about_zero_counters()
ret._clean_paramvec()
return ret
def _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype, state_space, independent_gates, independent_spam):
""" local_noise=True creates lindblad ops that are embedded & composed 1Q ops, and assumes
that modelnoise specifies 1Q noise. local_noise=False assumes modelnoise specifies n-qubit noise"""
qubit_labels = processor_spec.qubit_labels
num_qubits = processor_spec.num_qubits
singleQ_state_space = _statespace.default_space_for_udim(2) # single qubit state space
# Step 1 -- get the ideal prep and POVM, created as the types we want
# Step 2 -- add noise, by composing ideal with a noise operation (if desired)
prep_layers = {}
povm_layers = {}
def _add_prep_noise(prep_ops):
""" Adds one or more noise ops to prep_ops lists (to compose later) """
if local_noise: # then assume modelnoise specifies 1Q errors
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
err_gates = [prep_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [prep_noiseop1Q] * num_qubits
prep_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
prepNoiseMap = modelnoise.create_errormap('prep', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if prepNoiseMap is not None: prep_ops.append(prepNoiseMap)
def _add_povm_noise(povm_ops):
""" Adds one or more noise ops to prep_ops lists (to compose later) """
if local_noise: # then assume modelnoise specifies 1Q errors
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
err_gates = [povm_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [povm_noiseop1Q] * num_qubits
povm_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
povmNoiseMap = modelnoise.create_errormap('povm', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if povmNoiseMap is not None: povm_ops.append(povmNoiseMap)
def _add_to_prep_layers(ideal_prep, prep_ops):
""" Adds noise elements to prep_layers """
if len(prep_ops_to_compose) == 0:
prep_layers['rho0'] = ideal_prep
elif len(prep_ops_to_compose) == 1:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, prep_ops[0])
else:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, _op.ComposedOp(prep_ops))
def _add_to_povm_layers(ideal_povm, povm_ops):
""" Adds noise elements to povm_layers """
if len(povm_ops_to_compose) == 0:
povm_layers['Mdefault'] = ideal_povm
elif len(povm_ops_to_compose) == 1:
povm_layers['Mdefault'] = _povm.ComposedPOVM(povm_ops[0], ideal_povm, 'pp')
else:
povm_layers['Mdefault'] = _povm.ComposedPOVM(_op.ComposedOp(povm_ops), ideal_povm, 'pp')
def _create_nq_noise(lndtype):
if local_noise:
# create a 1-qubit exp(errorgen) that is applied to each qubit independently
errgen_1Q = _op.LindbladErrorgen.from_error_generator(singleQ_state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype, state_space=None)
err_gateNQ = _op.ComposedOp([_op.EmbeddedOp(state_space, [qubit_labels[i]],
_op.ExpErrorgenOp(errgen_1Q.copy()))
for i in range(num_qubits)], evotype, state_space)
else:
# create an n-qubit exp(errorgen)
errgen_NQ = _op.LindbladErrorgen.from_error_generator(state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype,
state_space=state_space)
err_gateNQ = _op.ExpErrorgenOp(errgen_NQ)
return err_gateNQ
# Here's where the actual logic starts. The above functions avoid repeated blocks within the different
# cases below.
# Prep logic
if isinstance(ideal_prep_type, (tuple, list)): ideal_prep_type = ideal_prep_type[0] # HACK to support multiple vals
if ideal_prep_type == 'computational' or ideal_prep_type.startswith('lindblad '):
ideal_prep = _state.ComputationalBasisState([0] * num_qubits, 'pp', evotype, state_space)
prep_ops_to_compose = []
if ideal_prep_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM
lndtype = ideal_prep_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
prep_ops_to_compose.append(err_gateNQ)
# Add noise
_add_prep_noise(prep_ops_to_compose)
#Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
elif ideal_prep_type.startswith('tensor product '):
#Note: with "tensor product <X>" types, e.g. "tensor product static", we assume modelnoise specifies just
# a 1Q noise operation, even when `local_noise=False`
vectype = ideal_prep_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_prep1Q = _state.create_from_pure_vector(v0, vectype, 'pp', evotype, state_space=None)
prep_factors = [ideal_prep1Q.copy() for i in range(num_qubits)]
# Add noise
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
prep_factors = [_state.ComposedState(
factor, (prep_noiseop1Q.copy() if independent_spam else prep_noiseop1Q)) for factor in prep_factors]
prep_layers['rho0'] = _state.TensorProductState(prep_factors, state_space)
else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs
vectype = ideal_prep_type
vecs = [] # all the basis vectors for num_qubits
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_prep = _state.create_from_pure_vector(vecs[0], vectype, 'pp', evotype, state_space=state_space)
# Add noise
prep_ops_to_compose = []
_add_prep_noise(prep_ops_to_compose)
# Add final ops to returned dictionaries
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
# Povm logic
if isinstance(ideal_povm_type, (tuple, list)): ideal_povm_type = ideal_povm_type[0] # HACK to support multiple vals
if ideal_povm_type == 'computational' or ideal_povm_type.startswith('lindblad '):
ideal_povm = _povm.ComputationalBasisPOVM(num_qubits, evotype, state_space=state_space)
povm_ops_to_compose = []
if ideal_povm_type.startswith('lindblad '): # then add a composed exp(errorgen) to computational SPAM
lndtype = ideal_povm_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
povm_ops_to_compose.append(err_gateNQ.copy()) # .copy() => POVM errors independent
# Add noise
_add_povm_noise(povm_ops_to_compose)
#Add final ops to returned dictionaries (Note: None -> ComputationPOVM within ComposedPOVM)
effective_ideal_povm = None if len(povm_ops_to_compose) > 0 else ideal_povm
_add_to_povm_layers(effective_ideal_povm, povm_ops_to_compose)
elif ideal_povm_type.startswith('tensor product '):
#Note: with "tensor product <X>" types, e.g. "tensor product static", we assume modelnoise specifies just
# a 1Q noise operation, even when `local_noise=False`
vectype = ideal_povm_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_povm1Q = _povm.create_from_pure_vectors([('0', v0), ('1', v1)], vectype, 'pp',
evotype, state_space=None)
povm_factors = [ideal_povm1Q.copy() for i in range(num_qubits)]
# Add noise
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
povm_factors = [_povm.ComposedPOVM(
(povm_noiseop1Q.copy() if independent_spam else povm_noiseop1Q), factor, 'pp')
for factor in povm_factors]
povm_layers['Mdefault'] = _povm.TensorProductPOVM(povm_factors, evotype, state_space)
else: # assume ideal_spam_type is a valid 'vectype' for creating n-qubit state vectors & POVMs
vectype = ideal_povm_type
vecs = [] # all the basis vectors for num_qubits
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_povm = _povm.create_from_pure_vectors(
[(format(i, 'b').zfill(num_qubits), v) for i, v in enumerate(vecs)],
vectype, 'pp', evotype, state_space=state_space)
# Add noise
povm_ops_to_compose = []
_add_povm_noise(povm_ops_to_compose)
# Add final ops to returned dictionaries
_add_to_povm_layers(ideal_povm, povm_ops_to_compose)
return prep_layers, povm_layers
def _setup_local_gates(processor_spec, evotype, modelnoise=None, custom_gates=None,
ideal_gate_type=('static standard', 'static clifford', 'static unitary')):
"""
Construct a dictionary of potentially noisy gates that act only on their target qubits.
These gates are "local" because they act only on their intended target qubits. The gates
consist of an ideal gate (obviously local, and crosstalk free) of the type given by
`ideal_gate_type` composed with a noise operation given by `modelnoise`, if one exists.
The returned dictionary contains keys for all the gate names in `processor_spec`. Custom
gate objects can be given by `custom_gates`, which override the normal gate construction.
Parameters
----------
processor_spec : ProcessorSpec
The processor to create gate operations for. This object specifies the
gate names and unitaries for the processor, among other things.
evotype : Evotype
Create gate objects with this evolution type.
modelnoise : ModelNoise, optional
Noise that should be applied after the ideal gates. This noise must
be *local* to each gate (i.e. acting on its target qubits). See the
:class:`ModelNoise` object documentation for details regarding how
to specify different types of noise. If `None`, then no noise is added .
custom_gates : dict, optional
A dictionary of gate objects that should be placed in the returned
dictionary in lieu of objects that would normally be constructed.
Keys are gate names and values are gates.
ideal_gate_type : str or tuple, optional
A gate type or tuple of gate types (listed in order of priority) which
is used to construct the ideal gates. A gate type usually specifies the
Python class that will be created, which determines 1) the parameterization
of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`
operation has no parameters and is a Clifford operation).
Returns
-------
gatedict : dict
A dictionary mapping gate names to local gate operations.
"""
std_gate_unitaries = _itgs.standard_gatename_unitaries()
if custom_gates is None: custom_gates = {}
if modelnoise is None: modelnoise = _OpModelPerOpNoise({})
# All possible entries into the upcoming gate dictionary
# Not just gatenames as it is possible to override in qubit-specific operations
all_keys = _lt.remove_duplicates(list(processor_spec.gate_names)
+ list(custom_gates.keys())
+ list(modelnoise.keys()))
# Cache ideal ops to ensure only one copy for each name
ideal_gates = {}
ideal_factories = {}
gatedict = _collections.OrderedDict()
for key in all_keys:
# Use custom gate directly as error gate
if key in custom_gates:
gatedict[key] = custom_gates[key]
continue
# Skip prep, and povm here, just do gates
if key in ['prep', 'povm']:
continue
# If key has qubits, get base name for lookup
label = _label.Label(key)
name = label.name
U = processor_spec.gate_unitaries[name] # all gate names must be in the processorspec
if ((name not in processor_spec.nonstd_gate_unitaries)
or (not callable(processor_spec.nonstd_gate_unitaries[name]) and (name in std_gate_unitaries)
and processor_spec.nonstd_gate_unitaries[name].shape == std_gate_unitaries[name].shape
and _np.allclose(processor_spec.nonstd_gate_unitaries[name], std_gate_unitaries[name]))):
stdname = name # setting `stdname` != None means we can try to create a StaticStandardOp below
else:
stdname = None
if isinstance(U, (int, _np.int64)): # signals that the gate is an identity on `U` qubits
ideal_gate_state_space = _statespace.default_space_for_num_qubits(U)
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate_state_space, target_labels=None)
if noiseop is not None:
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([], evotype, ideal_gate_state_space) # (identity gate on N qubits)
elif not callable(U): # normal operation (not a factory)
ideal_gate = ideal_gates.get(name, None)
if ideal_gate is None:
ideal_gate = _op.create_from_unitary_mx(U, ideal_gate_type, 'pp', stdname, evotype, state_space=None)
ideal_gates[name] = ideal_gate
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate.state_space, target_labels=None)
# Note: above line creates a *local* noise op, working entirely in the ideal gate's target space.
# This means it will fail to create error maps with a given (non-local/stencil) set of sslbls, as desired
if noiseop is None:
gatedict[key] = ideal_gate
else:
if isinstance(noiseop, _op.ComposedOp): # avoid additional nested ComposedOp if we already have one
noiseop.insert(0, ideal_gate)
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([ideal_gate, noiseop])
else: # a factory, given by the unitary-valued function U: args -> unitary
ideal_factory = ideal_factories.get(name, None)
if ideal_factory is None:
local_state_space = _statespace.default_space_for_udim(U.shape[0]) # factory *function* SHAPE
ideal_factory = _opfactory.UnitaryOpFactory(U, local_state_space, 'pp', evotype)
ideal_factories[name] = ideal_factory
noiseop = modelnoise.create_errormap(key, evotype, ideal_factory.state_space, target_labels=None)
gatedict[key] = _opfactory.ComposedOpFactory([ideal_factory, noiseop]) \
if (noiseop is not None) else ideal_factory
return gatedict
def create_crosstalk_free_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto", on_construction_error='raise',
independent_gates=False, independent_spam=True, ensure_composed_gates=False,
ideal_gate_type='auto', ideal_spam_type='computational', implicit_idle_mode='none'):
"""
Create a n-qubit "crosstalk-free" model.
By virtue of being crosstalk-free, this model's operations only
act nontrivially on their target qubits. Gates consist of an ideal gate
operation possibly followed by an error operation.
Errors can be specified using any combination of the 4 error rate/coeff arguments,
but each gate name must be provided exclusively to one type of specification.
Each specification results in a different type of operation, depending on the parameterization:
- `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)
- `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)
- `lindblad_error_coeffs` -> exp(LindbladErrorgen)
In addition to the gate names, the special values `"prep"` and `"povm"` may be
used as keys to specify the error on the state preparation, measurement, respectively.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict, optional
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects override any other behavior for constructing
their designated operations. Keys of this dictionary may
be string-type gate *names* or labels that include target qubits.
depolarization_strengths : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are floats that specify the strength of uniform depolarization.
stochastic_error_probs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are tuples that specify Pauli-stochastic rates for each of the non-trivial
Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).
lindblad_error_coeffs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are dictionaries corresponding to the `lindblad_term_dict` kwarg taken
by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients.
depolarization_parameterization : str of {"depolarize", "stochastic", or "lindblad"}
Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen
is used to parameterize the depolarization noise, respectively.
When "depolarize" (the default), a DepolarizeOp is created with the strength given
in `depolarization_strengths`. When "stochastic", the depolarization strength is split
evenly among the stochastic channels of a StochasticOp. When "lindblad", the depolarization
strength is split evenly among the coefficients of the stochastic error generators
(which are exponentiated to form a LindbladErrorgen with the "depol" parameterization).
stochastic_parameterization : str of {"stochastic", or "lindblad"}
Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the
stochastic noise, respectively. When "stochastic", elements of `stochastic_error_probs`
are used as coefficients in a linear combination of stochastic channels (the default).
When "lindblad", the elements of `stochastic_error_probs` are coefficients of
stochastic error generators (which are exponentiated to form a LindbladErrorgen with the
"cptp" parameterization).
lindblad_parameterization : "auto" or a LindbladErrorgen paramtype
Determines the parameterization of the LindbladErrorgen. When "auto" (the default), the parameterization
is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.
When not "auto", the parameterization type is passed through to the LindbladErrorgen.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The simulator used to compute predicted probabilities for the
resulting :class:`Model`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
on_construction_error : {'raise','warn',ignore'}
What to do when the creation of a gate with the given
`parameterization` fails. Usually you'll want to `"raise"` the error.
In some cases, for example when converting as many gates as you can
into `parameterization="clifford"` gates, `"warn"` or even `"ignore"`
may be useful.
independent_gates : bool, optional
Whether gates are allowed independent local noise or not. If False,
then all gates with the same name (e.g. "Gx") will have the *same*
(local) noise (e.g. an overrotation by 1 degree), and the
`operation_bks['gates']` dictionary contains a single key per gate
name. If True, then gates with the same name acting on different
qubits may have different local noise, and so the
`operation_bks['gates']` dictionary contains a key for each gate
available gate placement.
ensure_composed_gates : bool, optional
If True then the elements of the `operation_bks['gates']` will always
be :class:`ComposedOp` objects. The purpose of this is to
facilitate modifying the gate operations after the model is created.
If False, then the appropriately parameterized gate objects (often
dense gates) are used directly.
ideal_gate_type : str or tuple, optional
A gate type or tuple of gate types (listed in order of priority) which
is used to construct the ideal gates. A gate type usually specifies the
Python class that will be created, which determines 1) the parameterization
of the gate and 2) the class/category of the gate (e.g. a :class:`StaticClifford`
operation has no parameters and is a Clifford operation).
ideal_spam_type : str or tuple, optional
Similar to `ideal_gate_type` but for SPAM elements (state preparations
and POVMs).
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"none"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
Returns
-------
LocalNoiseModel
A model with `"rho0"` prep, `"Mdefault"` POVM, and gates labeled by
the gate names and qubit labels (as specified by `processor_spec`).
For instance, the operation label for the `"Gx"` gate on the second
qubit might be `Label("Gx",1)`.
"""
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=False)
return _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, on_construction_error, independent_gates, independent_spam,
ensure_composed_gates, ideal_gate_type, ideal_spam_type, ideal_spam_type,
implicit_idle_mode)
def _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
on_construction_error='raise', independent_gates=False, independent_spam=True,
ensure_composed_gates=False, ideal_gate_type='auto', ideal_prep_type='auto',
ideal_povm_type='auto', implicit_idle_mode='none'):
"""
Create a n-qubit "crosstalk-free" model.
Similar to :method:`create_crosstalk_free_model` but the noise is input more generally,
as a :class:`ModelNoise` object. Arguments are the same as this function except that
`modelnoise` is given instead of several more specific noise-describing arguments.
Returns
-------
LocalNoiseModel
"""
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
gatedict = _setup_local_gates(processor_spec, evotype, modelnoise, custom_gates, ideal_gate_type)
# (Note: global idle is now handled through processor-spec processing)
# SPAM:
local_noise = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
modelnoise.warn_about_zero_counters()
return _LocalNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
evotype, simulator, on_construction_error,
independent_gates, ensure_composed_gates,
implicit_idle_mode)
def create_cloud_crosstalk_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto', evotype="default", simulator="auto",
independent_gates=False, independent_spam=True, errcomp_type="gates",
implicit_idle_mode="none", verbosity=0):
"""
Create a n-qubit "cloud-crosstalk" model.
In a cloud crosstalk model, gates consist of a (local) ideal gates followed
by an error operation that can act nontrivially on *any* of the processor's qubits
(not just a gate's target qubits). Typically a gate's errors are specified
relative to the gate's target qubits, forming a "cloud" of errors around the
target qubits using some notion of locality (that may not be spatial, e.g.
local in frequency). Currently, the "ideal" portion of each gate can only be
created as a *static* (parameterless) object -- all gate parameters come from
the error operation.
Errors can be specified using any combination of the 4 error rate/coeff arguments,
but each gate name must be provided exclusively to one type of specification.
Each specification results in a different type of operation, depending on the parameterization:
- `depolarization_strengths` -> DepolarizeOp, StochasticNoiseOp, or exp(LindbladErrorgen)
- `stochastic_error_probs` -> StochasticNoiseOp or exp(LindbladErrorgen)
- `lindblad_error_coeffs` -> exp(LindbladErrorgen)
In addition to the gate names, the special values `"prep"` and `"povm"` may be
used as keys to specify the error on the state preparation, measurement, respectively.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict, optional
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects override any other behavior for constructing
their designated operations. Keys of this dictionary may
be string-type gate *names* or labels that include target qubits.
depolarization_strengths : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are floats that specify the strength of uniform depolarization.
stochastic_error_probs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are tuples that specify Pauli-stochastic rates for each of the non-trivial
Paulis (so a 3-tuple would be expected for a 1Q gate and a 15-tuple for a 2Q gate).
lindblad_error_coeffs : dict, optional
A dictionary whose keys are gate names (e.g. `"Gx"`) and whose values
are dictionaries corresponding to the `lindblad_term_dict` kwarg taken
by `LindbladErrorgen`. Keys are `(termType, basisLabel1, <basisLabel2>)`
tuples, where `termType` can be `"H"` (Hamiltonian), `"S"`
(Stochastic), or `"A"` (Affine). Hamiltonian and Affine terms always
have a single basis label (so key is a 2-tuple) whereas Stochastic
tuples with 1 basis label indicate a *diagonal* term, and are the
only types of terms allowed when `nonham_mode != "all"`. Otherwise,
Stochastic term tuples can include 2 basis labels to specify
"off-diagonal" non-Hamiltonian Lindblad terms. Basis labels can be
strings or integers. Values are complex coefficients.
depolarization_parameterization : str of {"depolarize", "stochastic", or "lindblad"}
Determines whether a DepolarizeOp, StochasticNoiseOp, or LindbladErrorgen
is used to parameterize the depolarization noise, respectively.
When "depolarize" (the default), a DepolarizeOp is created with the strength given
in `depolarization_strengths`. When "stochastic", the depolarization strength is split
evenly among the stochastic channels of a StochasticOp. When "lindblad", the depolarization
strength is split evenly among the coefficients of the stochastic error generators
(which are exponentiated to form a LindbladErrorgen with the "depol" parameterization).
stochastic_parameterization : str of {"stochastic", or "lindblad"}
Determines whether a StochasticNoiseOp or LindbladErrorgen is used to parameterize the
stochastic noise, respectively. When "stochastic", elements of `stochastic_error_probs`
are used as coefficients in a linear combination of stochastic channels (the default).
When "lindblad", the elements of `stochastic_error_probs` are coefficients of
stochastic error generators (which are exponentiated to form a LindbladErrorgen with the
"cptp" parameterization).
lindblad_parameterization : "auto" or a LindbladErrorgen paramtype
Determines the parameterization of the LindbladErrorgen. When "auto" (the default), the parameterization
is inferred from the types of error generators specified in the `lindblad_error_coeffs` dictionaries.
When not "auto", the parameterization type is passed through to the LindbladErrorgen.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The simulator used to compute predicted probabilities for the
resulting :class:`Model`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
independent_gates : bool, optional
Whether gates are allowed independent noise or not. If False,
then all gates with the same name (e.g. "Gx") will have the *same*
noise (e.g. an overrotation by 1 degree), and the
`operation_bks['cloudnoise']` dictionary will contains a single key per gate
name. If True, then gates with the same name acting on different
qubits may have different local noise, and so the
`operation_bks['cloudnoise']` dictionary contains a key for each gate
available gate placement.
independent_spam : bool, optional
Similar to `indepenent_gates` but for SPAM operations.
errcomp_type : {'gates', 'errorgens'}
Whether errors should be combined by composing error maps (`gates`) or by
exponentiating the sum of error generators (composing the error generators,
`errorgens`). The latter is only an option when the noise is given solely
in terms of Lindblad error coefficients.
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"none"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
verbosity : int or VerbosityPrinter, optional
Amount of detail to print to stdout.
Returns
-------
CloudNoiseModel
"""
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, independent_gates, independent_spam, errcomp_type,
implicit_idle_mode, verbosity)
def _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None,
evotype="default", simulator="auto", independent_gates=False,
independent_spam=True, errcomp_type="errorgens",
implicit_idle_mode="none", verbosity=0):
"""
Create a n-qubit "cloud-crosstalk" model.
Similar to :method:`create_cloud_crosstalk_model` but the noise is input more generally,
as a :class:`ModelNoise` object. Arguments are the same as this function except that
`modelnoise` is given instead of several more specific noise-describing arguments.
Returns
-------
CloudNoiseModel
"""
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels) # FUTURE: allow other types of state spaces somehow?
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
printer = _VerbosityPrinter.create_printer(verbosity)
#Create static ideal gates without any noise (we use `modelnoise` further down)
gatedict = _setup_local_gates(processor_spec, evotype, None, custom_gates,
ideal_gate_type=('static standard', 'static clifford', 'static unitary'))
stencils = _collections.OrderedDict()
# (Note: global idle is now processed with other processorspec gates)
# SPAM
local_noise = False
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
'computational', 'computational', evotype, state_space,
independent_gates, independent_spam)
if errcomp_type == 'gates':
create_stencil_fn = modelnoise.create_errormap_stencil
apply_stencil_fn = modelnoise.apply_errormap_stencil
elif errcomp_type == 'errorgens':
create_stencil_fn = modelnoise.create_errorgen_stencil
apply_stencil_fn = modelnoise.apply_errorgen_stencil
else:
raise ValueError("Invalid `errcomp_type` value: %s" % str(errcomp_type))
def build_cloudnoise_fn(lbl):
# lbl will be for a particular gate and target qubits. If we have error rates for this specific gate
# and target qubits (i.e this primitive layer op) then we should build it directly (and independently,
# regardless of the value of `independent_gates`) using these rates. Otherwise, if we have a stencil
# for this gate, then we should use it to construct the output, using a copy when gates are independent
# and a reference to the *same* stencil operations when `independent_gates==False`.
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
return None # no cloudnoise error for this label
return apply_stencil_fn(stencil, evotype, state_space, target_labels=lbl.sslbls,
qubit_graph=processor_spec.qubit_graph,
copy=independent_gates and (lbl not in modelnoise)) # no need to copy if first case
def build_cloudkey_fn(lbl):
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
# simple cloud-key when there is no cloud noise
return tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
#Otherwise, process stencil to get a list of all the qubit labels `lbl`'s cloudnoise error
# touches and form this into a key
cloud_sslbls = modelnoise.compute_stencil_absolute_sslbls(stencil, state_space, lbl.sslbls,
processor_spec.qubit_graph)
hashable_sslbls = tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
cloud_key = (hashable_sslbls, tuple(sorted(cloud_sslbls))) # (sets are unhashable)
return cloud_key
ret = _CloudNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
build_cloudnoise_fn, build_cloudkey_fn,
simulator, evotype, errcomp_type,
implicit_idle_mode, printer)
modelnoise.warn_about_zero_counters() # must do this after model creation so build_ fns have been run
return ret
def create_cloud_crosstalk_model_from_hops_and_weights(
processor_spec, custom_gates=None,
max_idle_weight=1, max_spam_weight=1,
maxhops=0, extra_weight_1_hops=0, extra_gate_weight=0,
simulator="auto", evotype='default',
gate_type="H+S", spam_type="H+S",
implicit_idle_mode="none", errcomp_type="gates",
independent_gates=True, independent_spam=True,
connected_highweight_errors=True,
verbosity=0):
"""
Create a "cloud crosstalk" model based on maximum error weights and hops along the processor's qubit graph.
This function provides a convenient way to construct cloud crosstalk models whose gate errors
consist of Pauli elementary error generators (i.e. that correspond to Lindblad error coefficients)
that are limited in weight (number of non-identity Paulis) and support (which qubits have non-trivial
Paulis on them). Errors are taken to be approximately local, meaning they are concentrated near the
target qubits of a gate, with the notion of locality taken from the processor specification's qubit graph.
The caller provides maximum-weight, maximum-hop (a "hop" is the movement along a single graph edge), and
gate type arguments to specify the set of possible errors on a gate.
- The global idle gate (corresponding to an empty circuit layer) has errors that are limited only by
a maximum weight, `max_idle_weight`.
- State preparation and POVM errors are constructed similarly, with a global-idle-like error following
or preceding the preparation or measurement, respectively.
- Gate errors are placed on all the qubits that can be reached with at most `maxhops` hops from (any of)
the gate's target qubits. Elementary error generators up to weight `W`, where `W` equals the number
of target qubits (e.g., 2 for a CNOT gate) plus `extra_gate_weight` are allowed. Weight-1 terms
are a special case, and the `extra_weight_1_hops` argument adds to the usual `maxhops` in this case
to allow weight-1 errors on a possibly larger region of qubits around the target qubits.
Parameters
----------
processor_spec : ProcessorSpec
The processor specification to create a model for. This object specifies the
gate names and unitaries for the processor, and their availability on the
processor.
custom_gates : dict
A dictionary that associates with gate labels
:class:`LinearOperator`, :class:`OpFactory`, or `numpy.ndarray`
objects. These objects describe the full action of the gate or
primitive-layer they're labeled by (so if the model represents
states by density matrices these objects are superoperators, not
unitaries), and override any standard construction based on builtin
gate names or `nonstd_gate_unitaries`. Keys of this dictionary must
be string-type gate *names* -- they cannot include state space labels
-- and they must be *static* (have zero parameters) because they
represent only the ideal behavior of each gate -- the cloudnoise
operations represent the parameterized noise. To fine-tune how this
noise is parameterized, call the :class:`CloudNoiseModel` constructor
directly.
max_idle_weight : int, optional
The maximum-weight for errors on the global idle gate.
max_spam_weight : int, optional
The maximum-weight for state preparation and measurement (SPAM) errors.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extra_weight_1_hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extra_gate_weight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
simulator : ForwardSimulator or {"auto", "matrix", "map"}
The circuit simulator used to compute any
requested probabilities, e.g. from :method:`probs` or
:method:`bulk_probs`. Using `"auto"` selects `"matrix"` when there
are 2 qubits or less, and otherwise selects `"map"`.
evotype : Evotype or str, optional
The evolution type of this model, describing how states are
represented. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
gate_type : str, optional
The Lindblad-error parameterization type used for gate operations. This
may be expanded in the future, but currently the gate errors *must* be of
the Lindblad error-generator coefficients type, and this argument specifies
what elementary error-generator coefficients are initially allowed (and linked to
model parameters), before maximum-weight and locality constraints are imposed.
In addition to the usual Lindblad error types, (e.g. `"H"`, `"H+S"`) the special
values `"none"` is allowed to indicate that there should be no errors on the gates
(useful if you only want errors on the SPAM, for instance).
spam_type : str, optional
Similar to `gate_type` but for SPAM elements (state preparations
and POVMs). This specifies the Lindblad-error parameterization for the
state prepearation and POVM.
implicit_idle_mode : {'none', 'add_global'}
The way idel operations are added implicitly within the created model. `"nonw"`
doesn't add any "extra" idle operations when there is a layer that contains some
gates but not gates on all the qubits. `"add_global"` adds the global idle operation,
i.e., the operation for a global idle layer (zero gates - a completely empty layer),
to every layer that is simulated, using the global idle as a background idle that always
occurs regardless of the operation.
errcomp_type : {"gates","errorgens"}
How errors are composed when creating layer operations in the created
model. `"gates"` means that the errors on multiple gates in a single
layer are composed as separate and subsequent processes. Specifically,
the layer operation has the form `Composed(target,idleErr,cloudErr)`
where `target` is a composition of all the ideal gate operations in the
layer, `idleErr` is the global idle error if `implicit_idle_mode == 'add_global'`,
and `cloudErr` is the composition (ordered as layer-label) of cloud-
noise contributions, i.e. a map that acts as the product of exponentiated
error-generator matrices. `"errorgens"` means that layer operations
have the form `Composed(target, error)` where `target` is as above and
`error` results from composing (summing) the idle and cloud-noise error
*generators*, i.e. a map that acts as the exponentiated sum of error
generators (ordering is irrelevant in this case).
independent_gates : bool, optional
Whether the noise added to a gate when it acts on one set of target
qubits is independent of its noise on a different set of target qubits.
If False, then all gates with the same name (e.g. "Gx") will be constrained
to having the *same* noise on the cloud around the target qubits (even though
the target qubits and cloud are different). If True, then gate noise operations
for different sets of target qubits are independent.
independent_spam : bool, optional
Similar to `independent_gates` but for state preparation and measurement operations.
When `False`, the noise applied to each set (individual or pair or triple etc.) of
qubits must be the same, e.g., if the state preparation is a perfect preparation followed
by a single-qubit rotation then this rotation must be by the *same* angle on all of
the qubits.
connected_highweight_errors : bool, optional
An additional constraint regarding high-weight errors. When `True`, only high weight
(weight 2+) elementary error generators whose non-trivial Paulis occupy a *connected*
portion of the qubit graph are allowed. For example, if the qubit graph is a 1D chain
of 4 qubits, 1-2-3-4, and weight-2 errors are allowed on a single-qubit gate with
target = qubit-2, then weight-2 errors on 1-2 and 2-3 would be allowed, but errors on
1-3 would be forbidden. When `False`, no constraint is imposed.
verbosity : int or VerbosityPrinter, optional
An integer >= 0 dictating how must output to send to stdout.
Returns
-------
CloudNoiseModel
"""
# construct noise specifications for the cloudnoise model
modelnoise = {}
all_qubit_labels = processor_spec.qubit_labels
conn = connected_highweight_errors # shorthand: whether high-weight errors must be connected on the graph
global_idle_name = processor_spec.global_idle_gate_name
# Global Idle
if max_idle_weight > 0:
assert(global_idle_name is not None), \
"`max_idle_weight` must equal 0 for processor specs without a global idle gate!"
#printer.log("Creating Idle:")
wt_maxhop_tuples = [(i, None) for i in range(1, max_idle_weight + 1)]
modelnoise[global_idle_name] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples,
gate_type, conn)
# SPAM
if max_spam_weight > 0:
wt_maxhop_tuples = [(i, None) for i in range(1, max_spam_weight + 1)]
modelnoise['prep'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
modelnoise['povm'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
# Gates
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
for gatenm, gate_unitary in processor_spec.gate_unitaries.items():
if gatenm == global_idle_name: continue # processed above
gate_nQubits = int(gate_unitary) if isinstance(gate_unitary, (int, _np.int64)) \
else int(round(_np.log2(gate_unitary.shape[0]))) # NOTE: integer gate_unitary => idle on n qubits
if gate_nQubits not in (1, 2):
raise ValueError("Only 1- and 2-qubit gates are supported. %s acts on %d qubits!"
% (str(gatenm), gate_nQubits))
weight_maxhops_tuples = weight_maxhops_tuples_1Q if gate_nQubits == 1 else weight_maxhops_tuples_2Q
target_sslbls = ('@0',) if gate_nQubits == 1 else ('@0', '@1')
modelnoise[gatenm] = _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples,
gate_type, conn)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates,
evotype, simulator, independent_gates, independent_spam,
errcomp_type, implicit_idle_mode, verbosity)
def _iter_basis_inds(weight):
""" Iterate over product of `weight` non-identity Pauli 1Q basis indices """
basisIndList = [[1, 2, 3]] * weight # assume pauli 1Q basis, and only iterate over non-identity els
for basisInds in _itertools.product(*basisIndList):
yield basisInds
def _pauli_product_matrix(sigma_inds):
"""
Construct the Pauli product matrix from the given `sigma_inds`
Parameters
----------
sigma_inds : iterable
A sequence of integers in the range [0,3] corresponding to the
I, X, Y, Z Pauli basis matrices.
Returns
-------
numpy.ndarray or scipy.sparse.csr_matrix
"""
sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)
M = _np.identity(1, 'complex')
for i in sigma_inds:
M = _np.kron(M, sigmaVec[i])
return M
def _construct_restricted_weight_pauli_basis(wt, sparse=False):
basisEl_Id = _pauli_product_matrix(_np.zeros(wt, _np.int64))
errbasis = [basisEl_Id]
errbasis_lbls = ['I']
for err_basis_inds in _iter_basis_inds(wt):
error = _np.array(err_basis_inds, _np.int64) # length == wt
basisEl = _pauli_product_matrix(error)
errbasis.append(basisEl)
errbasis_lbls.append(''.join(["IXYZ"[i] for i in err_basis_inds]))
#printer.log("Error on qubits %s -> error basis of length %d" % (err_qubit_inds, len(errbasis)), 3)
return _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)
def _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples, lnd_parameterization, connected=True):
# This function:
# loop over all size-`wt` *connected* combinations, `err_qubit_inds`, of the qubit indices in
# `possible_err_qubit_inds`
# - construct a local weight-`wt` Pauli basis & corresponding LindbladErrorgen on `wt` qubits
# => replace with: opnoise.create_errorgen(evotype, state_space=None) where opnoise is for a wt-qubit op
# - embed this constructed local error onto `err_qubit_inds`
# - append embedded error onto running list
#
# Noise object structure:
# OpModelPerOpNoise( { op_key/'idle': { sslbls : opnoise } } )
# where sslbls can be absolute labels or stencil labels
# -- could have a fn that spreads a single opnoise onto all the sslbls
# given by size-`wt` connected combos of `possible_err_qubit_inds` - this would work for independent clouds
# -- have LindbladNoiseDict and another LindbladPauliAtWeight (?) noise objects,
# since we want to specify a lindblad noise by giving a weight and an initial basis (Pauli here)
# To build a cloudnoise model from hops & weights:
modelnoise_dict = {}
if lnd_parameterization == 'none' or lnd_parameterization is None:
return {} # special case when we don't want any error parameterization
for wt, max_hops in weight_maxhops_tuples:
if max_hops is None or max_hops == 0: # Note: maxHops not used in this case
stencil_lbl = _stencil.StencilLabelAllCombos(target_sslbls, wt, connected)
else:
stencil_lbl = _stencil.StencilLabelRadiusCombos(target_sslbls, max_hops, wt, connected)
local_state_space = _statespace.default_space_for_num_qubits(wt)
modelnoise_dict[stencil_lbl] = _LindbladNoise.from_basis_coefficients(
lnd_parameterization, _construct_restricted_weight_pauli_basis(wt),
local_state_space)
return modelnoise_dict
def _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization, lindblad_parameterization,
allow_nonlocal):
modelnoises = []
if depolarization_strengths is not None:
noise_dict = {}
for lbl, val in depolarization_strengths.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal depolarization strengths not allowed!")
noise_dict[lbl] = {k: _DepolarizationNoise(v, depolarization_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _DepolarizationNoise(val, depolarization_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if stochastic_error_probs is not None:
noise_dict = {}
for lbl, val in stochastic_error_probs.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal stochastic error probs not allowed!")
noise_dict[lbl] = {k: _StochasticNoise(v, stochastic_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _StochasticNoise(val, stochastic_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if lindblad_error_coeffs is not None:
if not allow_nonlocal: # the easy case
modelnoises.append(_OpModelPerOpNoise({lbl: _LindbladNoise(val, lindblad_parameterization)
for lbl, val in lindblad_error_coeffs.items()}))
else: # then need to process labels like ('H', 'XX:0,1') or 'HXX:0,1'
def process_stencil_labels(flat_lindblad_errs):
nonlocal_errors = _collections.OrderedDict()
local_errors = _collections.OrderedDict()
for nm, val in flat_lindblad_errs.items():
if isinstance(nm, str): nm = (nm[0], nm[1:]) # e.g. "HXX" => ('H','XX')
err_typ, basisEls = nm[0], nm[1:]
sslbls = None
local_nm = [err_typ]
for bel in basisEls: # e.g. bel could be "X:Q0" or "XX:Q0,Q1"
# OR "X:<n>" where n indexes a target qubit or "X:<dir>" where dir indicates
# a graph *direction*, e.g. "up"
if ':' in bel:
bel_name, bel_sslbls = bel.split(':') # should have form <name>:<comma-separated-sslbls>
bel_sslbls = bel_sslbls.split(',') # e.g. ('Q0','Q1')
integerized_sslbls = []
for ssl in bel_sslbls:
try: integerized_sslbls.append(int(ssl))
except: integerized_sslbls.append(ssl)
bel_sslbls = tuple(integerized_sslbls)
else:
bel_name = bel
bel_sslbls = None
if sslbls is None:
sslbls = bel_sslbls
else:
#Note: sslbls should always be the same if there are multiple basisEls,
# i.e for nm == ('S',bel1,bel2)
assert(sslbls is bel_sslbls or sslbls == bel_sslbls), \
"All basis elements of the same error term must operate on the *same* state!"
local_nm.append(bel_name) # drop the state space labels, e.g. "XY:Q0,Q1" => "XY"
# keep track of errors by the qubits they act on, as only each such
# set will have it's own LindbladErrorgen
local_nm = tuple(local_nm) # so it's hashable
if sslbls is not None:
sslbls = tuple(sorted(sslbls))
if sslbls not in nonlocal_errors:
nonlocal_errors[sslbls] = _collections.OrderedDict()
if local_nm in nonlocal_errors[sslbls]:
nonlocal_errors[sslbls][local_nm] += val
else:
nonlocal_errors[sslbls][local_nm] = val
else:
if local_nm in local_errors:
local_errors[local_nm] += val
else:
local_errors[local_nm] = val
if len(nonlocal_errors) == 0:
return _LindbladNoise(local_errors, lindblad_parameterization)
else:
all_errors = []
if len(local_errors) > 0:
all_errors.append((None, _LindbladNoise(local_errors, lindblad_parameterization)))
for sslbls, errdict in nonlocal_errors.items():
all_errors.append((sslbls, _LindbladNoise(errdict, lindblad_parameterization)))
return _collections.OrderedDict(all_errors)
modelnoises.append(_OpModelPerOpNoise({lbl: process_stencil_labels(val)
for lbl, val in lindblad_error_coeffs.items()}))
return _ComposedOpModelNoise(modelnoises)
@_deprecated_fn("This function is overly specific and will be removed soon.")
def _nparams_xycnot_cloudnoise_model(num_qubits, geometry="line", max_idle_weight=1, maxhops=0,
extra_weight_1_hops=0, extra_gate_weight=0, require_connected=False,
independent_1q_gates=True, zz_only=False, bidirectional_cnots=True, verbosity=0):
"""
Compute the number of parameters in a particular :class:`CloudNoiseModel`.
Returns the number of parameters in the :class:`CloudNoiseModel` containing
X(pi/2), Y(pi/2) and CNOT gates using the specified arguments without
actually constructing the model (useful for considering parameter-count
scaling).
Parameters
----------
num_qubits : int
The total number of qubits.
geometry : {"line","ring","grid","torus"} or QubitGraph
The type of connectivity among the qubits, specifying a
graph used to define neighbor relationships. Alternatively,
a :class:`QubitGraph` object may be passed directly.
max_idle_weight : int, optional
The maximum-weight for errors on the global idle gate.
maxhops : int
The locality constraint: for a gate, errors (of weight up to the
maximum weight for the gate) are allowed to occur on the gate's
target qubits and those reachable by hopping at most `maxhops` times
from a target qubit along nearest-neighbor links (defined by the
`geometry`).
extra_weight_1_hops : int, optional
Additional hops (adds to `maxhops`) for weight-1 errors. A value > 0
can be useful for allowing just weight-1 errors (of which there are
relatively few) to be dispersed farther from a gate's target qubits.
For example, a crosstalk-detecting model might use this.
extra_gate_weight : int, optional
Addtional weight, beyond the number of target qubits (taken as a "base
weight" - i.e. weight 2 for a 2Q gate), allowed for gate errors. If
this equals 1, for instance, then 1-qubit gates can have up to weight-2
errors and 2-qubit gates can have up to weight-3 errors.
require_connected : bool, optional
If True, then high-weight errors only occur on connected (via `geometry`) qubits.
For example in a line of qubits there would not be weight-2 errors on qubits 1 and 3.
independent_1q_gates : bool, optional
If True, 1Q gates on different qubits have separate (distinct) parameters. If
False, the 1Q gates of each type (e.g. an pi/2 X gate) for different qubits share
the same set of parameters.
zz_only : bool, optional
If True, the only high-weight errors allowed are of "Z^n" type.
bidirectional_cnots : bool
Whether CNOT gates can be performed in either direction (and each direction should
be treated as an indepedent gate)
verbosity : int, optional
An integer >= 0 dictating how much output to send to stdout.
Returns
-------
int
"""
# noise can be either a seed or a random array that is long enough to use
printer = _VerbosityPrinter.create_printer(verbosity)
printer.log("Computing parameters for a %d-qubit %s model" % (num_qubits, geometry))
qubitGraph = _QubitGraph.common_graph(num_qubits, geometry, directed=True, all_directions=True)
#printer.log("Created qubit graph:\n"+str(qubitGraph))
def idle_count_nparams(max_weight):
"""Parameter count of a `build_nqn_global_idle`-constructed gate"""
ret = 0
possible_err_qubit_inds = _np.arange(num_qubits)
for wt in range(1, max_weight + 1):
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
ret += nErrTargetLocations * nErrParams
return ret
def op_count_nparams(target_qubit_inds, weight_maxhops_tuples, debug=False):
"""Parameter count of a `build_nqn_composed_gate`-constructed gate"""
ret = 0
#Note: no contrib from idle noise (already parameterized)
for wt, maxHops in weight_maxhops_tuples:
possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops), _np.int64)
if require_connected:
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
else:
nErrTargetLocations = _scipy.special.comb(len(possible_err_qubit_inds), wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
if debug:
print(" -- wt%d, hops%d: inds=%s locs = %d, eparams=%d, total contrib = %d" %
(wt, maxHops, str(possible_err_qubit_inds), nErrTargetLocations,
nErrParams, nErrTargetLocations * nErrParams))
ret += nErrTargetLocations * nErrParams
return ret
nParams = _collections.OrderedDict()
printer.log("Creating Idle:")
nParams[_label.Label('Gi')] = idle_count_nparams(max_idle_weight)
#1Q gates: X(pi/2) & Y(pi/2) on each qubit
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
if independent_1q_gates:
for i in range(num_qubits):
printer.log("Creating 1Q X(pi/2) and Y(pi/2) gates on qubit %d!!" % i)
nParams[_label.Label("Gx", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gy", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
else:
printer.log("Creating common 1Q X(pi/2) and Y(pi/2) gates")
rep = int(num_qubits / 2)
nParams[_label.Label("Gxrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gyrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
#2Q gates: CNOT gates along each graph edge
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
seen_pairs = set()
for i, j in qubitGraph.edges(): # note: all edges have i<j so "control" of CNOT is always lower index (arbitrary)
if bidirectional_cnots is False:
ordered_tup = (i, j) if i <= j else (j, i)
if ordered_tup in seen_pairs: continue
else: seen_pairs.add(ordered_tup)
printer.log("Creating CNOT gate between qubits %d and %d!!" % (i, j))
nParams[_label.Label("Gcnot", (i, j))] = op_count_nparams((i, j), weight_maxhops_tuples_2Q)
#SPAM
nPOVM_1Q = 4 # params for a single 1Q POVM
nParams[_label.Label('rho0')] = 3 * num_qubits # 3 b/c each component is TP
nParams[_label.Label('Mdefault')] = nPOVM_1Q * num_qubits # num_qubits 1Q-POVMs
return nParams, sum(nParams.values())
| 53.577069 | 120 | 0.64688 |
import collections as _collections
import itertools as _itertools
from os import stat
from pygsti.modelmembers.instruments.instrument import Instrument
import numpy as _np
import scipy as _scipy
import scipy.linalg as _spl
from pygsti.evotypes import Evotype as _Evotype
from pygsti.modelmembers import operations as _op
from pygsti.modelmembers import povms as _povm
from pygsti.modelmembers import states as _state
from pygsti.modelmembers import instruments as _instrument
from pygsti.modelmembers.operations import opfactory as _opfactory
from pygsti.models import stencillabel as _stencil
from pygsti.models.modelnoise import OpModelNoise as _OpModelNoise
from pygsti.models.modelnoise import OpModelPerOpNoise as _OpModelPerOpNoise
from pygsti.models.modelnoise import ComposedOpModelNoise as _ComposedOpModelNoise
from pygsti.models.modelnoise import LindbladNoise as _LindbladNoise
from pygsti.models.modelnoise import StochasticNoise as _StochasticNoise
from pygsti.models.modelnoise import DepolarizationNoise as _DepolarizationNoise
from pygsti.models import explicitmodel as _emdl
from pygsti.models import gaugegroup as _gg
from pygsti.models.localnoisemodel import LocalNoiseModel as _LocalNoiseModel
from pygsti.models.cloudnoisemodel import CloudNoiseModel as _CloudNoiseModel
from pygsti.baseobjs import label as _label
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
from pygsti.baseobjs.basis import ExplicitBasis as _ExplicitBasis
from pygsti.baseobjs.basis import DirectSumBasis as _DirectSumBasis
from pygsti.baseobjs.qubitgraph import QubitGraph as _QubitGraph
from pygsti.tools import basistools as _bt
from pygsti.tools import internalgates as _itgs
from pygsti.tools import optools as _ot
from pygsti.tools import listtools as _lt
from pygsti.baseobjs.basisconstructors import sqrt2, id2x2, sigmax, sigmay, sigmaz
from pygsti.baseobjs.verbosityprinter import VerbosityPrinter as _VerbosityPrinter
from pygsti.tools.legacytools import deprecate as _deprecated_fn
---------------------------------------------------------------------------------------
# -- Helper Functions ----------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
def to_label(lbl):
try: return int(lbl)
except: return lbl.strip()
def to_labels(lbls):
return [to_label(lbl) for lbl in lbls]
# ------------------------------------------------------------------------------------------------------------------
# -- End Helper Functions ------------------------------------------------------------------------------------------
# ------------------------------------------------------------------------------------------------------------------
#FUTURE?: type_preferences = ('static standard', 'static clifford', 'static unitary')
build_evotype = 'default'
superop_mxs_in_basis = []
exprTerms = op_expr.split(':')
for exprTerm in exprTerms:
l = exprTerm.index('('); r = exprTerm.rindex(')')
opName = exprTerm[0:l]
argsStr = exprTerm[l + 1:r]
args = argsStr.split(',')
if opName == "I":
# qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
labels = to_labels(args)
stateSpaceUDim = int(_np.product([state_space.label_udimension(l) for l in labels]))
# a complex 2x2 mx unitary for the identity in Pauli-product basis
Uop = _op.StaticUnitaryOp(_np.identity(stateSpaceUDim, 'complex'), 'pp', build_evotype)
#FUTURE?:
# stdname = 'Gi' if (stateSpaceUDim == 2) else None
# Uop = _op.create_from_unitary_mx(_np.identity(stateSpaceUDim, complex), type_preferences, 'pp',
# stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, labels, Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "D":
# like 'I', but only parameterize the diagonal elements - so can be a depolarization-type map
raise NotImplementedError("Removed temporarily - need to update using embedded gates")
# # qubit labels (TODO: what about 'L' labels? -- not sure if they work with this...)
# labels = to_labels(args)
# stateSpaceDim = sslbls.product_dim(labels)
# if parameterization not in ("linear","linearTP"):
# raise ValueError("'D' gate only makes sense to use when and parameterization == 'linear'")
# if defaultI2P == "TP":
# # parameterize only the diagonals els after the first
# indicesToParameterize = [ (i,i) for i in range(1,stateSpaceDim**2) ]
# else:
# # parameterize only the diagonals els
# indicesToParameterize = [ (i,i) for i in range(0,stateSpaceDim**2) ]
# # *real* 4x4 mx in Pauli-product basis -- still just the identity!
# pp_opMx = _np.identity(stateSpaceDim**2, 'd')
# # pp_opMx assumed to be in the Pauli-product basis
# opTermInFinalBasis = embed_operation(pp_opMx, tuple(labels), indicesToParameterize)
elif opName in ('X', 'Y', 'Z'): # single-qubit gate names
assert(len(args) == 2) # theta, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label = to_label(args[1])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
if opName == 'X': ex = -1j * theta * sigmax / 2
elif opName == 'Y': ex = -1j * theta * sigmay / 2
elif opName == 'Z': ex = -1j * theta * sigmaz / 2
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', build_evotype)
#FUTURE?:
#stdname = None
#if _np.isclose(theta, _np.pi): stdname = 'G%spi' % opName.lower()
#elif _np.isclose(theta, _np.pi/2): stdname = 'G%spi2' % opName.lower()
# Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == 'N': # more general single-qubit gate
assert(len(args) == 5) # theta, sigmaX-coeff, sigmaY-coeff, sigmaZ-coeff, qubit-index
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
sxCoeff = eval(args[1], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
syCoeff = eval(args[2], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
szCoeff = eval(args[3], {"__builtins__": None}, {'pi': _np.pi, 'sqrt': _np.sqrt})
label = to_label(args[4])
assert(state_space.label_dimension(label) == 4), "%s gate must act on qubits!" % opName
ex = -1j * theta * (sxCoeff * sigmax / 2. + syCoeff * sigmay / 2. + szCoeff * sigmaz / 2.)
# complex 2x2 unitary matrix operating on single qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(_spl.expm(ex), 'pp', evotype=build_evotype)
#FUTURE?: Uop = _op.create_from_unitary_mx(_spl.expm(ex), type_preferences, 'pp', evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space in Pauli-product basis
Uop_embed = _op.EmbeddedOp(state_space, (label,), Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName in ('CX', 'CY', 'CZ', 'CNOT', 'CPHASE'): # two-qubit gate names
if opName in ('CX', 'CY', 'CZ'):
assert(len(args) == 3) # theta, qubit-label1, qubit-label2
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
label1 = to_label(args[1]); label2 = to_label(args[2])
if opName == 'CX': ex = -1j * theta * sigmax / 2
elif opName == 'CY': ex = -1j * theta * sigmay / 2
elif opName == 'CZ': ex = -1j * theta * sigmaz / 2
Utarget = _spl.expm(ex) # 2x2 unitary matrix operating on target qubit
else: # opName in ('CNOT','CPHASE')
assert(len(args) == 2) # qubit-label1, qubit-label2
label1 = to_label(args[0]); label2 = to_label(args[1])
if opName == 'CNOT':
Utarget = _np.array([[0, 1],
[1, 0]], 'd')
elif opName == 'CPHASE':
Utarget = _np.array([[1, 0],
[0, -1]], 'd')
# 4x4 unitary matrix operating on isolated two-qubit space
U = _np.identity(4, 'complex'); U[2:, 2:] = Utarget
assert(state_space.label_dimension(label1) == 4 and state_space.label_dimension(label2) == 4), \
"%s gate must act on qubits!" % opName
# complex 4x4 unitary matrix operating on two-qubit in Pauli-product basis
Uop = _op.StaticUnitaryOp(U, 'pp', build_evotype)
#FUTURE?:
# if opName == "CNOT": stdname = "Gcnot"
# elif opName == "CPHASE": stdname = "Gcphase"
# else: stdname = None
# Uop = _op.create_from_unitary_mx(U, type_preferences, 'pp', stdname=stdname, evotype=evotype)
# a complex 2*num_qubits x 2*num_qubits mx unitary on full space
Uop_embed = _op.EmbeddedOp(state_space, [label1, label2], Uop)
# a real 4*num_qubits x 4*num_qubits mx superoperator in Pauli-product basis
superop_mx_pp = Uop_embed.to_dense(on_space='HilbertSchmidt')
# a real 4*num_qubits x 4*num_qubits mx superoperator in final basis
superop_mx_in_basis = _bt.change_basis(superop_mx_pp, 'pp', basis)
elif opName == "LX": # TODO - better way to describe leakage?
assert(len(args) == 3) # theta, dmIndex1, dmIndex2 - X rotation between any two density matrix basis states
theta = eval(args[0], {"__builtins__": None}, {'pi': _np.pi})
i1 = int(args[1]) # row/column index of a single *state* within the density matrix
i2 = int(args[2]) # row/column index of a single *state* within the density matrix
ex = -1j * theta * sigmax / 2
Uop = _spl.expm(ex) # 2x2 unitary matrix operating on the i1-th and i2-th states of the state space basis
opDim = basis.dim
dmDim = int(_np.sqrt(basis.elsize)) # matrix dim of the "embedding space"
if isinstance(basis, _DirectSumBasis):
blockDims = [c.dim for c in basis.component_bases]
else: blockDims = [opDim]
Utot = _np.identity(dmDim, 'complex')
Utot[i1, i1] = Uop[0, 0]
Utot[i1, i2] = Uop[0, 1]
Utot[i2, i1] = Uop[1, 0]
Utot[i2, i2] = Uop[1, 1]
# dmDim^2 x dmDim^2 mx operating on vectorized total densty matrix
opTermInStdBasis = _ot.unitary_to_process_mx(Utot)
# contract [3] to [2, 1]
embedded_std_basis = _Basis.cast('std', 9) # [2]
std_basis = _Basis.cast('std', blockDims) # std basis w/blockdim structure, i.e. [4,1]
opTermInReducedStdBasis = _bt.resize_std_mx(opTermInStdBasis, 'contract',
embedded_std_basis, std_basis)
superop_mx_in_basis = _bt.change_basis(opTermInReducedStdBasis, std_basis, basis)
else: raise ValueError("Invalid gate name: %s" % opName)
superop_mxs_in_basis.append(superop_mx_in_basis)
#Note: expressions are listed in "matrix composition order"
final_superop_mx = superop_mxs_in_basis[0]
for mx in superop_mxs_in_basis[1:]:
final_superop_mx = _np.dot(final_superop_mx, mx)
if basis.real:
assert(_np.linalg.norm(final_superop_mx.imag) < 1e-6), "Operation matrix should be real but isn't!"
final_superop_mx = _np.real(final_superop_mx)
return _op.create_from_superop_mx(final_superop_mx, parameterization, basis,
evotype=evotype, state_space=state_space)
def _create_explicit_model_from_expressions(state_space, basis,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', gate_type="full", prep_type="auto",
povm_type="auto", instrument_type="auto", evotype='default'):
state_space = _statespace.StateSpace.cast(state_space)
ret = _emdl.ExplicitOpModel(state_space, basis.copy(), default_gate_type=gate_type,
default_prep_type=prep_type, default_povm_type=povm_type,
default_instrument_type=instrument_type, evotype=evotype)
if prep_type == "auto":
prep_type = _state.state_type_from_op_type(gate_type)
if povm_type == "auto":
povm_type = _povm.povm_type_from_op_type(gate_type)
if instrument_type == "auto":
instrument_type = _instrument.instrument_type_from_op_type(gate_type)
for label, rhoExpr in zip(prep_labels, prep_expressions):
vec = create_spam_vector(rhoExpr, state_space, basis)
ret.preps[label] = _state.create_from_dmvec(vec, prep_type, basis, evotype, state_space)
if isinstance(povm_labels, str):
povm_labels = [povm_labels]
effect_labels = [effect_labels]
effect_expressions = [effect_expressions]
dmDim = int(_np.sqrt(basis.dim))
for povmLbl, ELbls, EExprs in zip(povm_labels,
effect_labels, effect_expressions):
effect_vecs = {}
if ELbls == "standard":
qubit_dim = 4
if state_space.num_tensor_product_blocks == 1 and \
all([ldim == qubit_dim for ldim in state_space.tensor_product_block_dimensions(0)]):
nQubits = len(state_space.tensor_product_block_dimensions(0))
ELbls = [''.join(t) for t in _itertools.product(('0', '1'), repeat=nQubits)]
else:
ELbls = list(map(str, range(dmDim)))
if EExprs == "standard":
EExprs = list(map(str, range(dmDim)))
effect_vecs = {label: create_spam_vector(expr, state_space, basis)
for label, expr in zip(ELbls, EExprs)}
if len(effect_vecs) > 0:
ret.povms[povmLbl] = _povm.create_from_dmvecs(effect_vecs, povm_type, basis, evotype, state_space)
for (opLabel, opExpr) in zip(op_labels, op_expressions):
ret.operations[opLabel] = create_operation(opExpr, state_space, basis, gate_type, evotype)
if gate_type == "full":
ret.default_gauge_group = _gg.FullGaugeGroup(ret.state_space, evotype)
elif gate_type == "full TP":
ret.default_gauge_group = _gg.TPGaugeGroup(ret.state_space, evotype)
elif gate_type == 'CPTP':
ret.default_gauge_group = _gg.UnitaryGaugeGroup(ret.state_space, basis, evotype)
else:
ret.default_gauge_group = _gg.TrivialGaugeGroup(ret.state_space)
ret._clean_paramvec()
return ret
def create_explicit_model_from_expressions(state_space,
op_labels, op_expressions,
prep_labels=('rho0',), prep_expressions=('0',),
effect_labels='standard', effect_expressions='standard',
povm_labels='Mdefault', basis="auto", gate_type="full",
prep_type="auto", povm_type="auto", instrument_type="auto",
evotype='default'):
#Note: so far, all allowed `parameterization` values => densitymx evotype
state_space = _statespace.StateSpace.cast(state_space)
stateSpaceDim = state_space.dim
# Note: what about state_space_labels.tpb_dims?
if basis == "auto":
if _np.isclose(_np.log2(stateSpaceDim) / 2,
round(_np.log2(stateSpaceDim) / 2)):
basis = "pp"
elif stateSpaceDim == 9:
basis = "qt"
else: basis = "gm"
return _create_explicit_model_from_expressions(state_space,
_Basis.cast(basis, state_space),
op_labels, op_expressions,
prep_labels, prep_expressions,
effect_labels, effect_expressions,
povm_labels, gate_type=gate_type,
prep_type=prep_type, povm_type=povm_type,
instrument_type=instrument_type, evotype=evotype)
def create_explicit_alias_model(mdl_primitives, alias_dict):
mdl_new = mdl_primitives.copy()
for gl in mdl_primitives.operations.keys():
del mdl_new.operations[gl] # remove all gates from mdl_new
for gl, opstr in alias_dict.items():
mdl_new.operations[gl] = mdl_primitives.sim.product(opstr)
#Creates fully parameterized gates by default...
mdl_new._clean_paramvec()
return mdl_new
def create_explicit_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_spam_type='computational',
embed_gates=False, basis='pp'):
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_explicit_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, ideal_gate_type, ideal_spam_type, ideal_spam_type, embed_gates, basis)
def _create_explicit_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
ideal_gate_type='auto', ideal_prep_type='auto', ideal_povm_type='auto',
embed_gates=False, basis='pp'):
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if custom_gates is None:
custom_gates = {}
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
def _embed_unitary(statespace, target_labels, unitary):
dummyop = _op.EmbeddedOp(statespace, target_labels,
_op.StaticUnitaryOp(unitary, basis='pp', evotype="statevec_slow")) # basis hardcode?
return dummyop.to_dense("Hilbert")
local_gates = _setup_local_gates(processor_spec, evotype, None, {}, ideal_gate_type) # no custom *local* gates
ret = _emdl.ExplicitOpModel(state_space, basis, default_gate_type=ideal_gate_type, evotype=evotype,
simulator=simulator)
# Special rule: when initializng an explicit model, if the processor spec has an implied global idle
# gate (e.g. "(idle)", then the created model instead has a empty-tuple Label as the key for this op.
global_idle_name = processor_spec.global_idle_gate_name
if (global_idle_name is not None) and global_idle_name.startswith('(') and global_idle_name.endswith(')'):
gn_to_make_emptytup = global_idle_name
else:
gn_to_make_emptytup = None
for gn, gate_unitary in processor_spec.gate_unitaries.items():
gate_is_factory = callable(gate_unitary)
resolved_avail = processor_spec.resolved_availability(gn)
if callable(resolved_avail) or resolved_avail == '*':
assert (embed_gates), "Cannot create factories with `embed_gates=False` yet!"
key = _label.Label(gn) if (gn != gn_to_make_emptytup) else _label.Label(())
allowed_sslbls_fn = resolved_avail if callable(resolved_avail) else None
gate_nQubits = processor_spec.gate_num_qubits(gn)
ideal_factory = _opfactory.EmbeddingOpFactory(
state_space, local_gates[gn], num_target_labels=gate_nQubits, allowed_sslbls_fn=allowed_sslbls_fn)
noiseop = modelnoise.create_errormap(key, evotype, state_space) # No target indices... just local errs?
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else: # resolved_avail is a list/tuple of available sslbls for the current gate/factory
for inds in resolved_avail: # inds are target qubit labels
key = _label.Label(()) if (inds is None and gn == gn_to_make_emptytup) else _label.Label(gn, inds)
if key in custom_gates: # allow custom_gates to specify gate elements directly
if isinstance(custom_gates[key], _opfactory.OpFactory):
ret.factories[key] = custom_gates[key]
elif isinstance(custom_gates[key], _op.LinearOperator):
ret.operations[key] = custom_gates[key]
else: # presumably a numpy array or something like it.
ret.operations[key] = _op.StaticArbitraryOp(custom_gates[key], evotype,
state_space) # static gates by default
continue
if gate_is_factory:
assert(embed_gates), "Cannot create factories with `embed_gates=False` yet!"
# TODO: check for modelnoise on *local* factory, i.e. create_errormap(gn, ...)??
if inds is None or inds == tuple(qubit_labels): # then no need to embed
ideal_factory = local_gates[gn]
else:
ideal_factory = _opfactory.EmbeddedOpFactory(state_space, inds, local_gates[gn])
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
factory = ideal_factory if (noiseop is None) else _op.ComposedOpFactory([ideal_factory, noiseop])
ret.factories[key] = factory
else:
if inds is None or inds == tuple(qubit_labels): # then no need to embed
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
assert(gate_unitary == len(qubit_labels)), \
"Idle unitary as int should be on all qubits for %s" % (str(gn))
ideal_gate = _op.ComposedOp([], evotype, state_space) # (identity gate on *all* qubits)
else:
ideal_gate = _op.create_from_unitary_mx(gate_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
else:
if embed_gates:
ideal_gate = local_gates[gn]
ideal_gate = _op.EmbeddedOp(state_space, inds, ideal_gate)
else:
if isinstance(gate_unitary, (int, _np.int64)): # interpret gate_unitary as identity
gate_unitary = _np.identity(2**gate_unitary, 'd') # turn into explicit identity op
if gate_unitary.shape[0] == state_space.udim: # no need to embed!
embedded_unitary = gate_unitary
else:
embedded_unitary = _embed_unitary(state_space, inds, gate_unitary)
ideal_gate = _op.create_from_unitary_mx(embedded_unitary, ideal_gate_type, 'pp',
None, evotype, state_space)
#TODO: check for modelnoise on *local* gate, i.e. create_errormap(gn, ...)??
noiseop = modelnoise.create_errormap(key, evotype, state_space, target_labels=inds)
layer = _op.ComposedOp([ideal_gate, noiseop]) if (noiseop is not None) else ideal_gate
ret.operations[key] = layer
# SPAM:
local_noise = False; independent_gates = True; independent_spam = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
for k, v in prep_layers.items():
ret.preps[k] = v
for k, v in povm_layers.items():
ret.povms[k] = v
modelnoise.warn_about_zero_counters()
ret._clean_paramvec()
return ret
def _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype, state_space, independent_gates, independent_spam):
qubit_labels = processor_spec.qubit_labels
num_qubits = processor_spec.num_qubits
singleQ_state_space = _statespace.default_space_for_udim(2) # single qubit state space
# Step 1 -- get the ideal prep and POVM, created as the types we want
# Step 2 -- add noise, by composing ideal with a noise operation (if desired)
prep_layers = {}
povm_layers = {}
def _add_prep_noise(prep_ops):
if local_noise: # then assume modelnoise specifies 1Q errors
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
err_gates = [prep_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [prep_noiseop1Q] * num_qubits
prep_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
prepNoiseMap = modelnoise.create_errormap('prep', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if prepNoiseMap is not None: prep_ops.append(prepNoiseMap)
def _add_povm_noise(povm_ops):
if local_noise: # then assume modelnoise specifies 1Q errors
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
err_gates = [povm_noiseop1Q.copy() for i in range(num_qubits)] \
if independent_gates else [povm_noiseop1Q] * num_qubits
povm_ops.extend([_op.EmbeddedOp(state_space, [qubit_labels[i]], err_gates[i])
for i in range(num_qubits)])
else: # use modelnoise to construct n-qubit noise
povmNoiseMap = modelnoise.create_errormap('povm', evotype, state_space, target_labels=None,
qubit_graph=processor_spec.qubit_graph)
if povmNoiseMap is not None: povm_ops.append(povmNoiseMap)
def _add_to_prep_layers(ideal_prep, prep_ops):
if len(prep_ops_to_compose) == 0:
prep_layers['rho0'] = ideal_prep
elif len(prep_ops_to_compose) == 1:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, prep_ops[0])
else:
prep_layers['rho0'] = _state.ComposedState(ideal_prep, _op.ComposedOp(prep_ops))
def _add_to_povm_layers(ideal_povm, povm_ops):
if len(povm_ops_to_compose) == 0:
povm_layers['Mdefault'] = ideal_povm
elif len(povm_ops_to_compose) == 1:
povm_layers['Mdefault'] = _povm.ComposedPOVM(povm_ops[0], ideal_povm, 'pp')
else:
povm_layers['Mdefault'] = _povm.ComposedPOVM(_op.ComposedOp(povm_ops), ideal_povm, 'pp')
def _create_nq_noise(lndtype):
if local_noise:
# create a 1-qubit exp(errorgen) that is applied to each qubit independently
errgen_1Q = _op.LindbladErrorgen.from_error_generator(singleQ_state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype, state_space=None)
err_gateNQ = _op.ComposedOp([_op.EmbeddedOp(state_space, [qubit_labels[i]],
_op.ExpErrorgenOp(errgen_1Q.copy()))
for i in range(num_qubits)], evotype, state_space)
else:
# create an n-qubit exp(errorgen)
errgen_NQ = _op.LindbladErrorgen.from_error_generator(state_space.dim, lndtype, 'pp', 'pp',
truncate=True, evotype=evotype,
state_space=state_space)
err_gateNQ = _op.ExpErrorgenOp(errgen_NQ)
return err_gateNQ
# Here's where the actual logic starts. The above functions avoid repeated blocks within the different
if isinstance(ideal_prep_type, (tuple, list)): ideal_prep_type = ideal_prep_type[0]
if ideal_prep_type == 'computational' or ideal_prep_type.startswith('lindblad '):
ideal_prep = _state.ComputationalBasisState([0] * num_qubits, 'pp', evotype, state_space)
prep_ops_to_compose = []
if ideal_prep_type.startswith('lindblad '):
lndtype = ideal_prep_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
prep_ops_to_compose.append(err_gateNQ)
_add_prep_noise(prep_ops_to_compose)
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
elif ideal_prep_type.startswith('tensor product '):
vectype = ideal_prep_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_prep1Q = _state.create_from_pure_vector(v0, vectype, 'pp', evotype, state_space=None)
prep_factors = [ideal_prep1Q.copy() for i in range(num_qubits)]
prep_noiseop1Q = modelnoise.create_errormap('prep', evotype, singleQ_state_space, target_labels=None)
if prep_noiseop1Q is not None:
prep_factors = [_state.ComposedState(
factor, (prep_noiseop1Q.copy() if independent_spam else prep_noiseop1Q)) for factor in prep_factors]
prep_layers['rho0'] = _state.TensorProductState(prep_factors, state_space)
else:
vectype = ideal_prep_type
vecs = []
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_prep = _state.create_from_pure_vector(vecs[0], vectype, 'pp', evotype, state_space=state_space)
prep_ops_to_compose = []
_add_prep_noise(prep_ops_to_compose)
_add_to_prep_layers(ideal_prep, prep_ops_to_compose)
if isinstance(ideal_povm_type, (tuple, list)): ideal_povm_type = ideal_povm_type[0]
if ideal_povm_type == 'computational' or ideal_povm_type.startswith('lindblad '):
ideal_povm = _povm.ComputationalBasisPOVM(num_qubits, evotype, state_space=state_space)
povm_ops_to_compose = []
if ideal_povm_type.startswith('lindblad '):
lndtype = ideal_povm_type[len('lindblad '):]
err_gateNQ = _create_nq_noise(lndtype)
povm_ops_to_compose.append(err_gateNQ.copy())
_add_povm_noise(povm_ops_to_compose)
effective_ideal_povm = None if len(povm_ops_to_compose) > 0 else ideal_povm
_add_to_povm_layers(effective_ideal_povm, povm_ops_to_compose)
elif ideal_povm_type.startswith('tensor product '):
vectype = ideal_povm_type[len('tensor product '):]
v0, v1 = _np.array([1, 0], 'd'), _np.array([0, 1], 'd')
ideal_povm1Q = _povm.create_from_pure_vectors([('0', v0), ('1', v1)], vectype, 'pp',
evotype, state_space=None)
povm_factors = [ideal_povm1Q.copy() for i in range(num_qubits)]
povm_noiseop1Q = modelnoise.create_errormap('povm', evotype, singleQ_state_space, target_labels=None)
if povm_noiseop1Q is not None:
povm_factors = [_povm.ComposedPOVM(
(povm_noiseop1Q.copy() if independent_spam else povm_noiseop1Q), factor, 'pp')
for factor in povm_factors]
povm_layers['Mdefault'] = _povm.TensorProductPOVM(povm_factors, evotype, state_space)
else:
vectype = ideal_povm_type
vecs = []
for i in range(2**num_qubits):
v = _np.zeros(2**num_qubits, 'd'); v[i] = 1.0
vecs.append(v)
ideal_povm = _povm.create_from_pure_vectors(
[(format(i, 'b').zfill(num_qubits), v) for i, v in enumerate(vecs)],
vectype, 'pp', evotype, state_space=state_space)
povm_ops_to_compose = []
_add_povm_noise(povm_ops_to_compose)
_add_to_povm_layers(ideal_povm, povm_ops_to_compose)
return prep_layers, povm_layers
def _setup_local_gates(processor_spec, evotype, modelnoise=None, custom_gates=None,
ideal_gate_type=('static standard', 'static clifford', 'static unitary')):
std_gate_unitaries = _itgs.standard_gatename_unitaries()
if custom_gates is None: custom_gates = {}
if modelnoise is None: modelnoise = _OpModelPerOpNoise({})
all_keys = _lt.remove_duplicates(list(processor_spec.gate_names)
+ list(custom_gates.keys())
+ list(modelnoise.keys()))
ideal_gates = {}
ideal_factories = {}
gatedict = _collections.OrderedDict()
for key in all_keys:
if key in custom_gates:
gatedict[key] = custom_gates[key]
continue
if key in ['prep', 'povm']:
continue
label = _label.Label(key)
name = label.name
U = processor_spec.gate_unitaries[name]
if ((name not in processor_spec.nonstd_gate_unitaries)
or (not callable(processor_spec.nonstd_gate_unitaries[name]) and (name in std_gate_unitaries)
and processor_spec.nonstd_gate_unitaries[name].shape == std_gate_unitaries[name].shape
and _np.allclose(processor_spec.nonstd_gate_unitaries[name], std_gate_unitaries[name]))):
stdname = name
else:
stdname = None
if isinstance(U, (int, _np.int64)):
ideal_gate_state_space = _statespace.default_space_for_num_qubits(U)
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate_state_space, target_labels=None)
if noiseop is not None:
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([], evotype, ideal_gate_state_space)
elif not callable(U):
ideal_gate = ideal_gates.get(name, None)
if ideal_gate is None:
ideal_gate = _op.create_from_unitary_mx(U, ideal_gate_type, 'pp', stdname, evotype, state_space=None)
ideal_gates[name] = ideal_gate
noiseop = modelnoise.create_errormap(key, evotype, ideal_gate.state_space, target_labels=None)
# This means it will fail to create error maps with a given (non-local/stencil) set of sslbls, as desired
if noiseop is None:
gatedict[key] = ideal_gate
else:
if isinstance(noiseop, _op.ComposedOp): # avoid additional nested ComposedOp if we already have one
noiseop.insert(0, ideal_gate)
gatedict[key] = noiseop
else:
gatedict[key] = _op.ComposedOp([ideal_gate, noiseop])
else: # a factory, given by the unitary-valued function U: args -> unitary
ideal_factory = ideal_factories.get(name, None)
if ideal_factory is None:
local_state_space = _statespace.default_space_for_udim(U.shape[0]) # factory *function* SHAPE
ideal_factory = _opfactory.UnitaryOpFactory(U, local_state_space, 'pp', evotype)
ideal_factories[name] = ideal_factory
noiseop = modelnoise.create_errormap(key, evotype, ideal_factory.state_space, target_labels=None)
gatedict[key] = _opfactory.ComposedOpFactory([ideal_factory, noiseop]) \
if (noiseop is not None) else ideal_factory
return gatedict
def create_crosstalk_free_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto',
evotype="default", simulator="auto", on_construction_error='raise',
independent_gates=False, independent_spam=True, ensure_composed_gates=False,
ideal_gate_type='auto', ideal_spam_type='computational', implicit_idle_mode='none'):
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=False)
return _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, on_construction_error, independent_gates, independent_spam,
ensure_composed_gates, ideal_gate_type, ideal_spam_type, ideal_spam_type,
implicit_idle_mode)
def _create_crosstalk_free_model(processor_spec, modelnoise, custom_gates=None, evotype="default", simulator="auto",
on_construction_error='raise', independent_gates=False, independent_spam=True,
ensure_composed_gates=False, ideal_gate_type='auto', ideal_prep_type='auto',
ideal_povm_type='auto', implicit_idle_mode='none'):
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels)
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
if ideal_gate_type == "auto":
ideal_gate_type = ('static standard', 'static clifford', 'static unitary')
if ideal_prep_type == "auto":
ideal_prep_type = _state.state_type_from_op_type(ideal_gate_type)
if ideal_povm_type == "auto":
ideal_povm_type = _povm.povm_type_from_op_type(ideal_gate_type)
gatedict = _setup_local_gates(processor_spec, evotype, modelnoise, custom_gates, ideal_gate_type)
# (Note: global idle is now handled through processor-spec processing)
# SPAM:
local_noise = True
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
ideal_prep_type, ideal_povm_type, evotype,
state_space, independent_gates, independent_spam)
modelnoise.warn_about_zero_counters()
return _LocalNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
evotype, simulator, on_construction_error,
independent_gates, ensure_composed_gates,
implicit_idle_mode)
def create_cloud_crosstalk_model(processor_spec, custom_gates=None,
depolarization_strengths=None, stochastic_error_probs=None, lindblad_error_coeffs=None,
depolarization_parameterization='depolarize', stochastic_parameterization='stochastic',
lindblad_parameterization='auto', evotype="default", simulator="auto",
independent_gates=False, independent_spam=True, errcomp_type="gates",
implicit_idle_mode="none", verbosity=0):
modelnoise = _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization,
lindblad_parameterization, allow_nonlocal=True)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates, evotype,
simulator, independent_gates, independent_spam, errcomp_type,
implicit_idle_mode, verbosity)
def _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates=None,
evotype="default", simulator="auto", independent_gates=False,
independent_spam=True, errcomp_type="errorgens",
implicit_idle_mode="none", verbosity=0):
qubit_labels = processor_spec.qubit_labels
state_space = _statespace.QubitSpace(qubit_labels) # FUTURE: allow other types of state spaces somehow?
evotype = _Evotype.cast(evotype)
modelnoise = _OpModelNoise.cast(modelnoise)
modelnoise.reset_access_counters()
printer = _VerbosityPrinter.create_printer(verbosity)
#Create static ideal gates without any noise (we use `modelnoise` further down)
gatedict = _setup_local_gates(processor_spec, evotype, None, custom_gates,
ideal_gate_type=('static standard', 'static clifford', 'static unitary'))
stencils = _collections.OrderedDict()
# (Note: global idle is now processed with other processorspec gates)
# SPAM
local_noise = False
prep_layers, povm_layers = _create_spam_layers(processor_spec, modelnoise, local_noise,
'computational', 'computational', evotype, state_space,
independent_gates, independent_spam)
if errcomp_type == 'gates':
create_stencil_fn = modelnoise.create_errormap_stencil
apply_stencil_fn = modelnoise.apply_errormap_stencil
elif errcomp_type == 'errorgens':
create_stencil_fn = modelnoise.create_errorgen_stencil
apply_stencil_fn = modelnoise.apply_errorgen_stencil
else:
raise ValueError("Invalid `errcomp_type` value: %s" % str(errcomp_type))
def build_cloudnoise_fn(lbl):
# lbl will be for a particular gate and target qubits. If we have error rates for this specific gate
# and target qubits (i.e this primitive layer op) then we should build it directly (and independently,
# regardless of the value of `independent_gates`) using these rates. Otherwise, if we have a stencil
# for this gate, then we should use it to construct the output, using a copy when gates are independent
# and a reference to the *same* stencil operations when `independent_gates==False`.
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
return None # no cloudnoise error for this label
return apply_stencil_fn(stencil, evotype, state_space, target_labels=lbl.sslbls,
qubit_graph=processor_spec.qubit_graph,
copy=independent_gates and (lbl not in modelnoise)) # no need to copy if first case
def build_cloudkey_fn(lbl):
num_sslbls = len(lbl.sslbls) if (lbl.sslbls is not None) else None
if lbl in modelnoise:
stencil = create_stencil_fn(lbl, evotype, state_space, num_target_labels=num_sslbls)
elif lbl.name in stencils:
stencil = stencils[lbl.name]
elif lbl.name in modelnoise:
stencils[lbl.name] = create_stencil_fn(lbl.name, evotype, state_space, num_target_labels=num_sslbls)
stencil = stencils[lbl.name]
else:
# simple cloud-key when there is no cloud noise
return tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
#Otherwise, process stencil to get a list of all the qubit labels `lbl`'s cloudnoise error
cloud_sslbls = modelnoise.compute_stencil_absolute_sslbls(stencil, state_space, lbl.sslbls,
processor_spec.qubit_graph)
hashable_sslbls = tuple(lbl.sslbls) if (lbl.sslbls is not None) else qubit_labels
cloud_key = (hashable_sslbls, tuple(sorted(cloud_sslbls)))
return cloud_key
ret = _CloudNoiseModel(processor_spec, gatedict, prep_layers, povm_layers,
build_cloudnoise_fn, build_cloudkey_fn,
simulator, evotype, errcomp_type,
implicit_idle_mode, printer)
modelnoise.warn_about_zero_counters()
return ret
def create_cloud_crosstalk_model_from_hops_and_weights(
processor_spec, custom_gates=None,
max_idle_weight=1, max_spam_weight=1,
maxhops=0, extra_weight_1_hops=0, extra_gate_weight=0,
simulator="auto", evotype='default',
gate_type="H+S", spam_type="H+S",
implicit_idle_mode="none", errcomp_type="gates",
independent_gates=True, independent_spam=True,
connected_highweight_errors=True,
verbosity=0):
modelnoise = {}
all_qubit_labels = processor_spec.qubit_labels
conn = connected_highweight_errors
global_idle_name = processor_spec.global_idle_gate_name
if max_idle_weight > 0:
assert(global_idle_name is not None), \
"`max_idle_weight` must equal 0 for processor specs without a global idle gate!"
wt_maxhop_tuples = [(i, None) for i in range(1, max_idle_weight + 1)]
modelnoise[global_idle_name] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples,
gate_type, conn)
if max_spam_weight > 0:
wt_maxhop_tuples = [(i, None) for i in range(1, max_spam_weight + 1)]
modelnoise['prep'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
modelnoise['povm'] = _build_weight_maxhops_modelnoise(all_qubit_labels, wt_maxhop_tuples, spam_type, conn)
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
for gatenm, gate_unitary in processor_spec.gate_unitaries.items():
if gatenm == global_idle_name: continue
gate_nQubits = int(gate_unitary) if isinstance(gate_unitary, (int, _np.int64)) \
else int(round(_np.log2(gate_unitary.shape[0])))
if gate_nQubits not in (1, 2):
raise ValueError("Only 1- and 2-qubit gates are supported. %s acts on %d qubits!"
% (str(gatenm), gate_nQubits))
weight_maxhops_tuples = weight_maxhops_tuples_1Q if gate_nQubits == 1 else weight_maxhops_tuples_2Q
target_sslbls = ('@0',) if gate_nQubits == 1 else ('@0', '@1')
modelnoise[gatenm] = _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples,
gate_type, conn)
return _create_cloud_crosstalk_model(processor_spec, modelnoise, custom_gates,
evotype, simulator, independent_gates, independent_spam,
errcomp_type, implicit_idle_mode, verbosity)
def _iter_basis_inds(weight):
basisIndList = [[1, 2, 3]] * weight
for basisInds in _itertools.product(*basisIndList):
yield basisInds
def _pauli_product_matrix(sigma_inds):
sigmaVec = (id2x2 / sqrt2, sigmax / sqrt2, sigmay / sqrt2, sigmaz / sqrt2)
M = _np.identity(1, 'complex')
for i in sigma_inds:
M = _np.kron(M, sigmaVec[i])
return M
def _construct_restricted_weight_pauli_basis(wt, sparse=False):
basisEl_Id = _pauli_product_matrix(_np.zeros(wt, _np.int64))
errbasis = [basisEl_Id]
errbasis_lbls = ['I']
for err_basis_inds in _iter_basis_inds(wt):
error = _np.array(err_basis_inds, _np.int64)
basisEl = _pauli_product_matrix(error)
errbasis.append(basisEl)
errbasis_lbls.append(''.join(["IXYZ"[i] for i in err_basis_inds]))
return _ExplicitBasis(errbasis, errbasis_lbls, real=True, sparse=sparse)
def _build_weight_maxhops_modelnoise(target_sslbls, weight_maxhops_tuples, lnd_parameterization, connected=True):
modelnoise_dict = {}
if lnd_parameterization == 'none' or lnd_parameterization is None:
return {}
for wt, max_hops in weight_maxhops_tuples:
if max_hops is None or max_hops == 0: # Note: maxHops not used in this case
stencil_lbl = _stencil.StencilLabelAllCombos(target_sslbls, wt, connected)
else:
stencil_lbl = _stencil.StencilLabelRadiusCombos(target_sslbls, max_hops, wt, connected)
local_state_space = _statespace.default_space_for_num_qubits(wt)
modelnoise_dict[stencil_lbl] = _LindbladNoise.from_basis_coefficients(
lnd_parameterization, _construct_restricted_weight_pauli_basis(wt),
local_state_space)
return modelnoise_dict
def _build_modelnoise_from_args(depolarization_strengths, stochastic_error_probs, lindblad_error_coeffs,
depolarization_parameterization, stochastic_parameterization, lindblad_parameterization,
allow_nonlocal):
modelnoises = []
if depolarization_strengths is not None:
noise_dict = {}
for lbl, val in depolarization_strengths.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal depolarization strengths not allowed!")
noise_dict[lbl] = {k: _DepolarizationNoise(v, depolarization_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _DepolarizationNoise(val, depolarization_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if stochastic_error_probs is not None:
noise_dict = {}
for lbl, val in stochastic_error_probs.items():
if isinstance(val, dict): # then value is actually a dictionary of sslbls -> noise specifications
if not allow_nonlocal: raise ValueError("Nonlocal stochastic error probs not allowed!")
noise_dict[lbl] = {k: _StochasticNoise(v, stochastic_parameterization) for k, v in val.items()}
else:
noise_dict[lbl] = _StochasticNoise(val, stochastic_parameterization)
modelnoises.append(_OpModelPerOpNoise(noise_dict))
if lindblad_error_coeffs is not None:
if not allow_nonlocal: # the easy case
modelnoises.append(_OpModelPerOpNoise({lbl: _LindbladNoise(val, lindblad_parameterization)
for lbl, val in lindblad_error_coeffs.items()}))
else: # then need to process labels like ('H', 'XX:0,1') or 'HXX:0,1'
def process_stencil_labels(flat_lindblad_errs):
nonlocal_errors = _collections.OrderedDict()
local_errors = _collections.OrderedDict()
for nm, val in flat_lindblad_errs.items():
if isinstance(nm, str): nm = (nm[0], nm[1:]) # e.g. "HXX" => ('H','XX')
err_typ, basisEls = nm[0], nm[1:]
sslbls = None
local_nm = [err_typ]
for bel in basisEls: # e.g. bel could be "X:Q0" or "XX:Q0,Q1"
# OR "X:<n>" where n indexes a target qubit or "X:<dir>" where dir indicates
# a graph *direction*, e.g. "up"
if ':' in bel:
bel_name, bel_sslbls = bel.split(':') # should have form <name>:<comma-separated-sslbls>
bel_sslbls = bel_sslbls.split(',') # e.g. ('Q0','Q1')
integerized_sslbls = []
for ssl in bel_sslbls:
try: integerized_sslbls.append(int(ssl))
except: integerized_sslbls.append(ssl)
bel_sslbls = tuple(integerized_sslbls)
else:
bel_name = bel
bel_sslbls = None
if sslbls is None:
sslbls = bel_sslbls
else:
#Note: sslbls should always be the same if there are multiple basisEls,
# i.e for nm == ('S',bel1,bel2)
assert(sslbls is bel_sslbls or sslbls == bel_sslbls), \
"All basis elements of the same error term must operate on the *same* state!"
local_nm.append(bel_name) # drop the state space labels, e.g. "XY:Q0,Q1" => "XY"
# keep track of errors by the qubits they act on, as only each such
# set will have it's own LindbladErrorgen
local_nm = tuple(local_nm)
if sslbls is not None:
sslbls = tuple(sorted(sslbls))
if sslbls not in nonlocal_errors:
nonlocal_errors[sslbls] = _collections.OrderedDict()
if local_nm in nonlocal_errors[sslbls]:
nonlocal_errors[sslbls][local_nm] += val
else:
nonlocal_errors[sslbls][local_nm] = val
else:
if local_nm in local_errors:
local_errors[local_nm] += val
else:
local_errors[local_nm] = val
if len(nonlocal_errors) == 0:
return _LindbladNoise(local_errors, lindblad_parameterization)
else:
all_errors = []
if len(local_errors) > 0:
all_errors.append((None, _LindbladNoise(local_errors, lindblad_parameterization)))
for sslbls, errdict in nonlocal_errors.items():
all_errors.append((sslbls, _LindbladNoise(errdict, lindblad_parameterization)))
return _collections.OrderedDict(all_errors)
modelnoises.append(_OpModelPerOpNoise({lbl: process_stencil_labels(val)
for lbl, val in lindblad_error_coeffs.items()}))
return _ComposedOpModelNoise(modelnoises)
@_deprecated_fn("This function is overly specific and will be removed soon.")
def _nparams_xycnot_cloudnoise_model(num_qubits, geometry="line", max_idle_weight=1, maxhops=0,
extra_weight_1_hops=0, extra_gate_weight=0, require_connected=False,
independent_1q_gates=True, zz_only=False, bidirectional_cnots=True, verbosity=0):
# noise can be either a seed or a random array that is long enough to use
printer = _VerbosityPrinter.create_printer(verbosity)
printer.log("Computing parameters for a %d-qubit %s model" % (num_qubits, geometry))
qubitGraph = _QubitGraph.common_graph(num_qubits, geometry, directed=True, all_directions=True)
#printer.log("Created qubit graph:\n"+str(qubitGraph))
def idle_count_nparams(max_weight):
ret = 0
possible_err_qubit_inds = _np.arange(num_qubits)
for wt in range(1, max_weight + 1):
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
ret += nErrTargetLocations * nErrParams
return ret
def op_count_nparams(target_qubit_inds, weight_maxhops_tuples, debug=False):
ret = 0
#Note: no contrib from idle noise (already parameterized)
for wt, maxHops in weight_maxhops_tuples:
possible_err_qubit_inds = _np.array(qubitGraph.radius(target_qubit_inds, maxHops), _np.int64)
if require_connected:
nErrTargetLocations = qubitGraph.connected_combos(possible_err_qubit_inds, wt)
else:
nErrTargetLocations = _scipy.special.comb(len(possible_err_qubit_inds), wt)
if zz_only and wt > 1: basisSizeWoutId = 1**wt # ( == 1)
else: basisSizeWoutId = 3**wt # (X,Y,Z)^wt
nErrParams = 2 * basisSizeWoutId # H+S terms
if debug:
print(" -- wt%d, hops%d: inds=%s locs = %d, eparams=%d, total contrib = %d" %
(wt, maxHops, str(possible_err_qubit_inds), nErrTargetLocations,
nErrParams, nErrTargetLocations * nErrParams))
ret += nErrTargetLocations * nErrParams
return ret
nParams = _collections.OrderedDict()
printer.log("Creating Idle:")
nParams[_label.Label('Gi')] = idle_count_nparams(max_idle_weight)
#1Q gates: X(pi/2) & Y(pi/2) on each qubit
weight_maxhops_tuples_1Q = [(1, maxhops + extra_weight_1_hops)] + \
[(1 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
if independent_1q_gates:
for i in range(num_qubits):
printer.log("Creating 1Q X(pi/2) and Y(pi/2) gates on qubit %d!!" % i)
nParams[_label.Label("Gx", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gy", i)] = op_count_nparams((i,), weight_maxhops_tuples_1Q)
else:
printer.log("Creating common 1Q X(pi/2) and Y(pi/2) gates")
rep = int(num_qubits / 2)
nParams[_label.Label("Gxrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
nParams[_label.Label("Gyrep")] = op_count_nparams((rep,), weight_maxhops_tuples_1Q)
#2Q gates: CNOT gates along each graph edge
weight_maxhops_tuples_2Q = [(1, maxhops + extra_weight_1_hops), (2, maxhops)] + \
[(2 + x, maxhops) for x in range(1, extra_gate_weight + 1)]
seen_pairs = set()
for i, j in qubitGraph.edges(): # note: all edges have i<j so "control" of CNOT is always lower index (arbitrary)
if bidirectional_cnots is False:
ordered_tup = (i, j) if i <= j else (j, i)
if ordered_tup in seen_pairs: continue
else: seen_pairs.add(ordered_tup)
printer.log("Creating CNOT gate between qubits %d and %d!!" % (i, j))
nParams[_label.Label("Gcnot", (i, j))] = op_count_nparams((i, j), weight_maxhops_tuples_2Q)
#SPAM
nPOVM_1Q = 4 # params for a single 1Q POVM
nParams[_label.Label('rho0')] = 3 * num_qubits # 3 b/c each component is TP
nParams[_label.Label('Mdefault')] = nPOVM_1Q * num_qubits # num_qubits 1Q-POVMs
return nParams, sum(nParams.values())
| true | true |
f723bc9754ddb86c41340d88ffa3f486b80a42f3 | 258 | py | Python | weight_converter/utils.py | adamriaz/weight-converter | 1ac82ef2935e76ec6c78f322e995fbce6454b6c8 | [
"MIT"
] | null | null | null | weight_converter/utils.py | adamriaz/weight-converter | 1ac82ef2935e76ec6c78f322e995fbce6454b6c8 | [
"MIT"
] | null | null | null | weight_converter/utils.py | adamriaz/weight-converter | 1ac82ef2935e76ec6c78f322e995fbce6454b6c8 | [
"MIT"
] | null | null | null | def divide_by_zero_check(func):
"""
Decorator for checking division by zero from user input
"""
def inner(value):
if value.value == 0:
raise ValueError('Cannot divide by zero!')
return func(value)
return inner
| 25.8 | 59 | 0.616279 | def divide_by_zero_check(func):
def inner(value):
if value.value == 0:
raise ValueError('Cannot divide by zero!')
return func(value)
return inner
| true | true |
f723be6ac409c60b2a0d167791d7a99d3f39abbb | 2,575 | py | Python | external_libs/spglib-1.9.9/python/test/test_hall_number_from_symmetry.py | shunsuke-sato/octopus | dcf68a185cdb13708395546b1557ca46aed969f6 | [
"Apache-2.0"
] | 4 | 2016-11-17T09:03:11.000Z | 2019-10-17T06:31:08.000Z | external_libs/spglib-1.9.9/python/test/test_hall_number_from_symmetry.py | shunsuke-sato/octopus | dcf68a185cdb13708395546b1557ca46aed969f6 | [
"Apache-2.0"
] | 1 | 2020-08-11T19:14:06.000Z | 2020-08-11T19:14:06.000Z | external_libs/spglib-1.9.9/python/test/test_hall_number_from_symmetry.py | shunsuke-sato/octopus | dcf68a185cdb13708395546b1557ca46aed969f6 | [
"Apache-2.0"
] | 5 | 2016-11-22T20:30:46.000Z | 2020-05-29T23:24:51.000Z | import unittest
import numpy as np
from spglib import get_symmetry_dataset, get_hall_number_from_symmetry
from vasp import read_vasp
from os import listdir
dirnames = ('cubic',
'hexagonal',
'monoclinic',
'orthorhombic',
'tetragonal',
'triclinic',
'trigonal',
'distorted',
'virtual_structure')
class TestGetHallNumberFromSymmetry(unittest.TestCase):
def setUp(self):
self._filenames = []
for d in dirnames:
self._filenames += ["%s/%s" % (d, fname)
for fname in listdir("./data/%s" % d)]
def tearDown(self):
pass
def test_get_hall_number_from_symmetry(self):
for fname in self._filenames:
spgnum = int(fname.split('-')[1])
cell = read_vasp("./data/%s" % fname)
if 'distorted' in fname:
dataset = get_symmetry_dataset(cell, symprec=1e-1)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-1)
if hall_number != dataset['hall_number']:
print("%d != %d in %s" %
(hall_number, dataset['hall_number'], fname))
ref_cell = (dataset['std_lattice'],
dataset['std_positions'],
dataset['std_types'])
dataset = get_symmetry_dataset(ref_cell, symprec=1e-5)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-5)
print("Using refinced cell: %d, %d in %s" %
(hall_number, dataset['hall_number'], fname))
else:
dataset = get_symmetry_dataset(cell, symprec=1e-5)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-5)
self.assertEqual(hall_number, dataset['hall_number'],
msg=("%d != %d in %s" %
(hall_number, dataset['hall_number'], fname)))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(
TestGetHallNumberFromSymmetry)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
| 39.015152 | 80 | 0.514951 | import unittest
import numpy as np
from spglib import get_symmetry_dataset, get_hall_number_from_symmetry
from vasp import read_vasp
from os import listdir
dirnames = ('cubic',
'hexagonal',
'monoclinic',
'orthorhombic',
'tetragonal',
'triclinic',
'trigonal',
'distorted',
'virtual_structure')
class TestGetHallNumberFromSymmetry(unittest.TestCase):
def setUp(self):
self._filenames = []
for d in dirnames:
self._filenames += ["%s/%s" % (d, fname)
for fname in listdir("./data/%s" % d)]
def tearDown(self):
pass
def test_get_hall_number_from_symmetry(self):
for fname in self._filenames:
spgnum = int(fname.split('-')[1])
cell = read_vasp("./data/%s" % fname)
if 'distorted' in fname:
dataset = get_symmetry_dataset(cell, symprec=1e-1)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-1)
if hall_number != dataset['hall_number']:
print("%d != %d in %s" %
(hall_number, dataset['hall_number'], fname))
ref_cell = (dataset['std_lattice'],
dataset['std_positions'],
dataset['std_types'])
dataset = get_symmetry_dataset(ref_cell, symprec=1e-5)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-5)
print("Using refinced cell: %d, %d in %s" %
(hall_number, dataset['hall_number'], fname))
else:
dataset = get_symmetry_dataset(cell, symprec=1e-5)
hall_number = get_hall_number_from_symmetry(
dataset['rotations'],
dataset['translations'],
symprec=1e-5)
self.assertEqual(hall_number, dataset['hall_number'],
msg=("%d != %d in %s" %
(hall_number, dataset['hall_number'], fname)))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(
TestGetHallNumberFromSymmetry)
unittest.TextTestRunner(verbosity=2).run(suite)
| true | true |
f723be8c97c6be2454140fb16008806dc90a7f1d | 2,740 | py | Python | data/get_data.py | OmarJabri7/SAIA | f45f1d8073d4b56f5bed6f378f791102b067317c | [
"MIT"
] | 1 | 2022-03-22T19:11:48.000Z | 2022-03-22T19:11:48.000Z | data/get_data.py | OmarJabri7/Disaster-Tweets-Kaggle | 54dfee4684dbfd5bf6cb58cc3974abc051022022 | [
"MIT"
] | null | null | null | data/get_data.py | OmarJabri7/Disaster-Tweets-Kaggle | 54dfee4684dbfd5bf6cb58cc3974abc051022022 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
from nltk.corpus import words
import nltk
import re
import string
from data_processing import DisasterProcessor
X = pd.read_csv("emotion_data/tweet_emotions.csv")
stop_wrds = nltk.corpus.stopwords.words("english")
columns = X.columns
columns = ["content"]
preprocessor = DisasterProcessor()
eng_words = set(words.words())
for column in columns:
X[column] = X[column].apply(
lambda x: ' '.join([re.sub("[$@&#]","",w) for w in x.lower().split(" ") if w]))
table = str.maketrans('', '', string.punctuation)
X[column] = X[column].apply(
lambda x: ' '.join([w.translate(table) for w in x.split(" ") if w.isalpha()]))
X[column] = X[column].apply(
lambda x: preprocessor.utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_wrds))
X[column] = X[column].apply(
lambda x: ' '.join([w for w in x.split(" ") if len(w) >= 2]))
X["content"] = X["content"].apply(
lambda x: ' '.join(([w for w in x.split(" ") if w in eng_words]))
)
unique_words = list(X['content'].str.split(' ', expand=True).stack().unique())
# X.Sentence = X.Sentence.apply(lambda x: x if len(x) > 2 else np.nan)
# X["clean_content"] = X["content"].str.replace('[#,@,&,=,[,http://]', '')
print(np.unique(X["sentiment"]))
X = X.loc[X['sentiment'].isin(['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger',
'surprise', 'worry'])]
# X = X["sentiment" in ['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger']]
X = X[['sentiment','content']]
# happy = X.loc[X['sentiment'].isin(['happiness','fun','enthusiasm','relief']), 'content'].values
happy = X.loc[X['sentiment'].isin(['happiness']), 'content'].values
love = X.loc[X['sentiment'].isin(['love']),'content'].values
# sadness = X.loc[X['sentiment'].isin(['sadness','worry']), 'content'].values
sadness = X.loc[X['sentiment'].isin(['sadness']), 'content'].values
# angry = X.loc[X['sentiment'].isin(['hate','anger']), 'content'].values
angry = X.loc[X['sentiment'].isin(['anger']), 'content'].values
surprise = X.loc[X['sentiment'].isin(['surprise']), 'content'].values
fear = X.loc[X['sentiment'].isin(['fear']),'content'].values
# emotions = dict(Emotion = ['happy','love','sadness','angry','surprise','fear'])
# data = {"Sentence" : [happy, love, sadness, angry, surprise, fear],
# "Emotion" : ['joy','love','sadness','anger','surprise','fear'],}
#
data = {"Sentence" : [sadness, angry, fear],
"Emotion" : ['sadness','anger','fear'],}
new_df = pd.DataFrame(data)
new_df = new_df.explode('Sentence', ignore_index=True)
new_df.to_csv('emotion_data/add_data.txt', header=None, index=None, sep=';')
| 37.027027 | 113 | 0.630657 | import pandas as pd
import numpy as np
from nltk.corpus import words
import nltk
import re
import string
from data_processing import DisasterProcessor
X = pd.read_csv("emotion_data/tweet_emotions.csv")
stop_wrds = nltk.corpus.stopwords.words("english")
columns = X.columns
columns = ["content"]
preprocessor = DisasterProcessor()
eng_words = set(words.words())
for column in columns:
X[column] = X[column].apply(
lambda x: ' '.join([re.sub("[$@&#]","",w) for w in x.lower().split(" ") if w]))
table = str.maketrans('', '', string.punctuation)
X[column] = X[column].apply(
lambda x: ' '.join([w.translate(table) for w in x.split(" ") if w.isalpha()]))
X[column] = X[column].apply(
lambda x: preprocessor.utils_preprocess_text(x, flg_stemm=False, flg_lemm=True, lst_stopwords=stop_wrds))
X[column] = X[column].apply(
lambda x: ' '.join([w for w in x.split(" ") if len(w) >= 2]))
X["content"] = X["content"].apply(
lambda x: ' '.join(([w for w in x.split(" ") if w in eng_words]))
)
unique_words = list(X['content'].str.split(' ', expand=True).stack().unique())
print(np.unique(X["sentiment"]))
X = X.loc[X['sentiment'].isin(['sadness','happiness','love','hate','fun','enthusiasm','relief','fear','anger',
'surprise', 'worry'])]
X = X[['sentiment','content']]
happy = X.loc[X['sentiment'].isin(['happiness']), 'content'].values
love = X.loc[X['sentiment'].isin(['love']),'content'].values
sadness = X.loc[X['sentiment'].isin(['sadness']), 'content'].values
angry = X.loc[X['sentiment'].isin(['anger']), 'content'].values
surprise = X.loc[X['sentiment'].isin(['surprise']), 'content'].values
fear = X.loc[X['sentiment'].isin(['fear']),'content'].values
data = {"Sentence" : [sadness, angry, fear],
"Emotion" : ['sadness','anger','fear'],}
new_df = pd.DataFrame(data)
new_df = new_df.explode('Sentence', ignore_index=True)
new_df.to_csv('emotion_data/add_data.txt', header=None, index=None, sep=';')
| true | true |
f723c10b490155fd4a2bd2ac7ff767f6f0cf72e9 | 8,494 | py | Python | crosshair/opcode_intercept.py | samuelchassot/CrossHair | 4eac7a23e470567cc23e6d0916ce6dd6820eacd8 | [
"MIT"
] | null | null | null | crosshair/opcode_intercept.py | samuelchassot/CrossHair | 4eac7a23e470567cc23e6d0916ce6dd6820eacd8 | [
"MIT"
] | null | null | null | crosshair/opcode_intercept.py | samuelchassot/CrossHair | 4eac7a23e470567cc23e6d0916ce6dd6820eacd8 | [
"MIT"
] | null | null | null | from collections.abc import MutableMapping, Set
import dis
from types import CodeType
from types import FrameType
from sys import version_info
from crosshair.core import CrossHairValue
from crosshair.core import register_opcode_patch
from crosshair.libimpl.builtinslib import SymbolicInt
from crosshair.libimpl.builtinslib import AnySymbolicStr
from crosshair.libimpl.builtinslib import LazyIntSymbolicStr
from crosshair.simplestructs import LinearSet
from crosshair.simplestructs import ShellMutableSequence
from crosshair.simplestructs import ShellMutableSet
from crosshair.simplestructs import SimpleDict
from crosshair.simplestructs import SliceView
from crosshair.tracers import COMPOSITE_TRACER
from crosshair.tracers import TracingModule
from crosshair.tracers import frame_stack_read
from crosshair.tracers import frame_stack_write
from crosshair.util import CrosshairInternal
BINARY_SUBSCR = dis.opmap["BINARY_SUBSCR"]
BUILD_STRING = dis.opmap["BUILD_STRING"]
COMPARE_OP = dis.opmap["COMPARE_OP"]
CONTAINS_OP = dis.opmap.get("CONTAINS_OP", 118)
FORMAT_VALUE = dis.opmap["FORMAT_VALUE"]
MAP_ADD = dis.opmap["MAP_ADD"]
SET_ADD = dis.opmap["SET_ADD"]
def frame_op_arg(frame):
return frame.f_code.co_code[frame.f_lasti + 1]
class SymbolicSubscriptInterceptor(TracingModule):
opcodes_wanted = frozenset([BINARY_SUBSCR])
def trace_op(self, frame, codeobj, codenum):
# Note that because this is called from inside a Python trace handler, tracing
# is automatically disabled, so there's no need for a `with NoTracing():` guard.
key = frame_stack_read(frame, -1)
if isinstance(key, (int, float, str)):
return
# If we got this far, the index is likely symbolic (or perhaps a slice object)
container = frame_stack_read(frame, -2)
container_type = type(container)
if container_type is dict:
# SimpleDict won't hash the keys it's given!
wrapped_dict = SimpleDict(list(container.items()))
frame_stack_write(frame, -2, wrapped_dict)
elif container_type is list:
if isinstance(key, slice):
if key.step not in (1, None):
return
start, stop = key.start, key.stop
if isinstance(start, SymbolicInt) or isinstance(stop, SymbolicInt):
view_wrapper = SliceView(container, 0, len(container))
frame_stack_write(frame, -2, ShellMutableSequence(view_wrapper))
else:
pass
# Nothing useful to do with concrete list and symbolic numeric index.
_CONTAINMENT_OP_TYPES = tuple(
i for (i, name) in enumerate(dis.cmp_op) if name in ("in", "not in")
)
assert len(_CONTAINMENT_OP_TYPES) in (0, 2)
class ContainmentInterceptor(TracingModule):
opcodes_wanted = frozenset(
[
COMPARE_OP,
CONTAINS_OP,
]
)
def trace_op(self, frame, codeobj, codenum):
if codenum == COMPARE_OP:
compare_type = frame_op_arg(frame)
if compare_type not in _CONTAINMENT_OP_TYPES:
return
item = frame_stack_read(frame, -2)
if not isinstance(item, CrossHairValue):
return
container = frame_stack_read(frame, -1)
containertype = type(container)
new_container = None
if containertype is str:
new_container = LazyIntSymbolicStr([ord(c) for c in container])
elif containertype is set:
new_container = ShellMutableSet(LinearSet(container))
if new_container is not None:
frame_stack_write(frame, -1, new_container)
class BuildStringInterceptor(TracingModule):
"""
Adds symbolic handling for the BUILD_STRING opcode (used by f-strings).
BUILD_STRING concatenates strings from the stack is a fast, but unforgiving way:
it requires all the substrings to be real Python strings.
We work around this by replacing the substrings with empty strings, computing the
concatenation ourselves, and swaping our result in after the opcode completes.
"""
opcodes_wanted = frozenset([BUILD_STRING])
def trace_op(self, frame, codeobj, codenum):
count = frame_op_arg(frame)
real_result = ""
for offset in range(-(count), 0):
substr = frame_stack_read(frame, offset)
if not isinstance(substr, (str, AnySymbolicStr)):
raise CrosshairInternal
# Because we know these are all symbolic or concrete strings, it's ok to
# not have tracing on when we do the concatenation here:
real_result += substr
frame_stack_write(frame, offset, "")
def post_op():
frame_stack_write(frame, -1, real_result)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
class FormatValueInterceptor(TracingModule):
"""Avoid realization during FORMAT_VALUE (used by f-strings)."""
opcodes_wanted = frozenset([FORMAT_VALUE])
def trace_op(self, frame, codeobj, codenum):
flags = frame_op_arg(frame)
if flags not in (0x00, 0x01):
return # formatting spec is present
orig_obj = frame_stack_read(frame, -1)
if not isinstance(orig_obj, AnySymbolicStr):
return
# Format a dummy empty string, and swap the original back in:
frame_stack_write(frame, -1, "")
def post_op():
frame_stack_write(frame, -1, orig_obj)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
class MapAddInterceptor(TracingModule):
"""De-optimize MAP_ADD over symbolics (used in dict comprehensions)."""
opcodes_wanted = frozenset([MAP_ADD])
def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None:
dict_offset = -(frame_op_arg(frame) + 2)
dict_obj = frame_stack_read(frame, dict_offset)
if not isinstance(dict_obj, (dict, MutableMapping)):
raise CrosshairInternal
top, second = frame_stack_read(frame, -1), frame_stack_read(frame, -2)
# Key and value were swapped in Python 3.8
key, value = (second, top) if version_info >= (3, 8) else (top, second)
if isinstance(dict_obj, dict):
if isinstance(key, CrossHairValue):
dict_obj = SimpleDict(list(dict_obj.items()))
else:
# Key and dict are concrete; continue as normal.
return
# Have the interpreter do a fake assinment, namely `{}[1] = 1`
frame_stack_write(frame, dict_offset, {})
frame_stack_write(frame, -1, 1)
frame_stack_write(frame, -2, 1)
# And do our own assignment separately:
dict_obj[key] = value
# Later, overwrite the interpreter's result with ours:
def post_op():
frame_stack_write(frame, dict_offset + 2, dict_obj)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
class SetAddInterceptor(TracingModule):
"""De-optimize SET_ADD over symbolics (used in set comprehensions)."""
opcodes_wanted = frozenset([SET_ADD])
def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None:
set_offset = -(frame_op_arg(frame) + 1)
set_obj = frame_stack_read(frame, set_offset)
if not isinstance(set_obj, Set):
raise CrosshairInternal(type(set_obj))
item = frame_stack_read(frame, -1)
if isinstance(set_obj, set):
if isinstance(item, CrossHairValue):
set_obj = ShellMutableSet(set_obj)
else:
# Set and value are concrete; continue as normal.
return
# Have the interpreter do a fake addition, namely `set().add(1)`
frame_stack_write(frame, set_offset, set())
frame_stack_write(frame, -1, 1)
# And do our own addition separately:
set_obj.add(item)
# Later, overwrite the interpreter's result with ours:
def post_op():
frame_stack_write(frame, set_offset + 1, set_obj)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
def make_registrations():
register_opcode_patch(SymbolicSubscriptInterceptor())
register_opcode_patch(ContainmentInterceptor())
register_opcode_patch(BuildStringInterceptor())
register_opcode_patch(FormatValueInterceptor())
register_opcode_patch(MapAddInterceptor())
register_opcode_patch(SetAddInterceptor())
| 38.089686 | 88 | 0.674005 | from collections.abc import MutableMapping, Set
import dis
from types import CodeType
from types import FrameType
from sys import version_info
from crosshair.core import CrossHairValue
from crosshair.core import register_opcode_patch
from crosshair.libimpl.builtinslib import SymbolicInt
from crosshair.libimpl.builtinslib import AnySymbolicStr
from crosshair.libimpl.builtinslib import LazyIntSymbolicStr
from crosshair.simplestructs import LinearSet
from crosshair.simplestructs import ShellMutableSequence
from crosshair.simplestructs import ShellMutableSet
from crosshair.simplestructs import SimpleDict
from crosshair.simplestructs import SliceView
from crosshair.tracers import COMPOSITE_TRACER
from crosshair.tracers import TracingModule
from crosshair.tracers import frame_stack_read
from crosshair.tracers import frame_stack_write
from crosshair.util import CrosshairInternal
BINARY_SUBSCR = dis.opmap["BINARY_SUBSCR"]
BUILD_STRING = dis.opmap["BUILD_STRING"]
COMPARE_OP = dis.opmap["COMPARE_OP"]
CONTAINS_OP = dis.opmap.get("CONTAINS_OP", 118)
FORMAT_VALUE = dis.opmap["FORMAT_VALUE"]
MAP_ADD = dis.opmap["MAP_ADD"]
SET_ADD = dis.opmap["SET_ADD"]
def frame_op_arg(frame):
return frame.f_code.co_code[frame.f_lasti + 1]
class SymbolicSubscriptInterceptor(TracingModule):
opcodes_wanted = frozenset([BINARY_SUBSCR])
def trace_op(self, frame, codeobj, codenum):
key = frame_stack_read(frame, -1)
if isinstance(key, (int, float, str)):
return
# If we got this far, the index is likely symbolic (or perhaps a slice object)
container = frame_stack_read(frame, -2)
container_type = type(container)
if container_type is dict:
# SimpleDict won't hash the keys it's given!
wrapped_dict = SimpleDict(list(container.items()))
frame_stack_write(frame, -2, wrapped_dict)
elif container_type is list:
if isinstance(key, slice):
if key.step not in (1, None):
return
start, stop = key.start, key.stop
if isinstance(start, SymbolicInt) or isinstance(stop, SymbolicInt):
view_wrapper = SliceView(container, 0, len(container))
frame_stack_write(frame, -2, ShellMutableSequence(view_wrapper))
else:
pass
# Nothing useful to do with concrete list and symbolic numeric index.
_CONTAINMENT_OP_TYPES = tuple(
i for (i, name) in enumerate(dis.cmp_op) if name in ("in", "not in")
)
assert len(_CONTAINMENT_OP_TYPES) in (0, 2)
class ContainmentInterceptor(TracingModule):
opcodes_wanted = frozenset(
[
COMPARE_OP,
CONTAINS_OP,
]
)
def trace_op(self, frame, codeobj, codenum):
if codenum == COMPARE_OP:
compare_type = frame_op_arg(frame)
if compare_type not in _CONTAINMENT_OP_TYPES:
return
item = frame_stack_read(frame, -2)
if not isinstance(item, CrossHairValue):
return
container = frame_stack_read(frame, -1)
containertype = type(container)
new_container = None
if containertype is str:
new_container = LazyIntSymbolicStr([ord(c) for c in container])
elif containertype is set:
new_container = ShellMutableSet(LinearSet(container))
if new_container is not None:
frame_stack_write(frame, -1, new_container)
class BuildStringInterceptor(TracingModule):
opcodes_wanted = frozenset([BUILD_STRING])
def trace_op(self, frame, codeobj, codenum):
count = frame_op_arg(frame)
real_result = ""
for offset in range(-(count), 0):
substr = frame_stack_read(frame, offset)
if not isinstance(substr, (str, AnySymbolicStr)):
raise CrosshairInternal
# Because we know these are all symbolic or concrete strings, it's ok to
real_result += substr
frame_stack_write(frame, offset, "")
def post_op():
frame_stack_write(frame, -1, real_result)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
class FormatValueInterceptor(TracingModule):
opcodes_wanted = frozenset([FORMAT_VALUE])
def trace_op(self, frame, codeobj, codenum):
flags = frame_op_arg(frame)
if flags not in (0x00, 0x01):
return
orig_obj = frame_stack_read(frame, -1)
if not isinstance(orig_obj, AnySymbolicStr):
return
frame_stack_write(frame, -1, "")
def post_op():
frame_stack_write(frame, -1, orig_obj)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
class MapAddInterceptor(TracingModule):
opcodes_wanted = frozenset([MAP_ADD])
def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None:
dict_offset = -(frame_op_arg(frame) + 2)
dict_obj = frame_stack_read(frame, dict_offset)
if not isinstance(dict_obj, (dict, MutableMapping)):
raise CrosshairInternal
top, second = frame_stack_read(frame, -1), frame_stack_read(frame, -2)
key, value = (second, top) if version_info >= (3, 8) else (top, second)
if isinstance(dict_obj, dict):
if isinstance(key, CrossHairValue):
dict_obj = SimpleDict(list(dict_obj.items()))
else:
return
frame_stack_write(frame, dict_offset, {})
frame_stack_write(frame, -1, 1)
frame_stack_write(frame, -2, 1)
dict_obj[key] = value
def post_op():
frame_stack_write(frame, dict_offset + 2, dict_obj)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
class SetAddInterceptor(TracingModule):
opcodes_wanted = frozenset([SET_ADD])
def trace_op(self, frame: FrameType, codeobj: CodeType, codenum: int) -> None:
set_offset = -(frame_op_arg(frame) + 1)
set_obj = frame_stack_read(frame, set_offset)
if not isinstance(set_obj, Set):
raise CrosshairInternal(type(set_obj))
item = frame_stack_read(frame, -1)
if isinstance(set_obj, set):
if isinstance(item, CrossHairValue):
set_obj = ShellMutableSet(set_obj)
else:
# Set and value are concrete; continue as normal.
return
# Have the interpreter do a fake addition, namely `set().add(1)`
frame_stack_write(frame, set_offset, set())
frame_stack_write(frame, -1, 1)
# And do our own addition separately:
set_obj.add(item)
# Later, overwrite the interpreter's result with ours:
def post_op():
frame_stack_write(frame, set_offset + 1, set_obj)
COMPOSITE_TRACER.set_postop_callback(codeobj, post_op)
def make_registrations():
register_opcode_patch(SymbolicSubscriptInterceptor())
register_opcode_patch(ContainmentInterceptor())
register_opcode_patch(BuildStringInterceptor())
register_opcode_patch(FormatValueInterceptor())
register_opcode_patch(MapAddInterceptor())
register_opcode_patch(SetAddInterceptor())
| true | true |
f723c12709c80e71332f0ad4c801db953c05c2c8 | 1,652 | py | Python | linptech/packet.py | yangguozhanzhao/linptech | 92ee1538d11baf473535cd0ed6b879adcee66e70 | [
"MIT"
] | 1 | 2020-07-26T05:37:43.000Z | 2020-07-26T05:37:43.000Z | linptech/packet.py | yangguozhanzhao/linptech | 92ee1538d11baf473535cd0ed6b879adcee66e70 | [
"MIT"
] | null | null | null | linptech/packet.py | yangguozhanzhao/linptech | 92ee1538d11baf473535cd0ed6b879adcee66e70 | [
"MIT"
] | 2 | 2018-04-03T04:17:12.000Z | 2018-10-12T09:44:20.000Z | from linptech.crc8 import crc8
import logging
class Packet(object):
'''
Base class for Packet.
Mainly used for for packet generation and
Packet.parse_msg(buf) for parsing message.
parse_msg() returns subclass, if one is defined for the data type.
'''
def __init__(self, data=None, optional="00"*7):
if data is None:
logging.warning('Packet.data is None')
else:
self.data = data
if optional is None:
logging.info('Packet.optional is None.')
else:
self.optional = optional
@staticmethod
def check(packet):
"""
check packet with crc
"""
if packet.startswith("550") and \
crc8(packet[2:10])==packet[10:12] and \
crc8(packet[12:-2])==packet[-2:]:
return True
else:
return False
@staticmethod
def parse(packet):
"""
parse an packet to data and optional for receive
"""
if Packet.check(packet):
try:
data_len=int(packet[4:6],16)
data=packet[12:12+data_len*2]
optional=packet[12+data_len*2:26+data_len*2]
return data,optional
except Exception as e:
logging.error("parse packet wrong:%s",e)
return
else :
logging.error("packet is invalid")
return
@staticmethod
def create(data=None, optional="00"*7):
"""
Creates an packet ready for sending.
Uses data and optional.
"""
try:
data_len = "{0:>02}".format(hex(int(len(data)/2))[2:])
m1 = "00"+data_len+"0701"
m2 = data+optional
packet = "55"+m1+crc8(m1)+m2+crc8(m2)
return packet
except Exception as e:
logging.error("create packet wrong:%s",e)
return
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
data="1f8000004581020101"
Packet.create(data) | 23.6 | 67 | 0.672518 | from linptech.crc8 import crc8
import logging
class Packet(object):
def __init__(self, data=None, optional="00"*7):
if data is None:
logging.warning('Packet.data is None')
else:
self.data = data
if optional is None:
logging.info('Packet.optional is None.')
else:
self.optional = optional
@staticmethod
def check(packet):
if packet.startswith("550") and \
crc8(packet[2:10])==packet[10:12] and \
crc8(packet[12:-2])==packet[-2:]:
return True
else:
return False
@staticmethod
def parse(packet):
if Packet.check(packet):
try:
data_len=int(packet[4:6],16)
data=packet[12:12+data_len*2]
optional=packet[12+data_len*2:26+data_len*2]
return data,optional
except Exception as e:
logging.error("parse packet wrong:%s",e)
return
else :
logging.error("packet is invalid")
return
@staticmethod
def create(data=None, optional="00"*7):
try:
data_len = "{0:>02}".format(hex(int(len(data)/2))[2:])
m1 = "00"+data_len+"0701"
m2 = data+optional
packet = "55"+m1+crc8(m1)+m2+crc8(m2)
return packet
except Exception as e:
logging.error("create packet wrong:%s",e)
return
if __name__ == "__main__":
logging.getLogger().setLevel(logging.INFO)
data="1f8000004581020101"
Packet.create(data) | true | true |
f723c13ed49b8c87b5f0275078a084b4c8f235a1 | 16,785 | py | Python | test/latency_position_test.py | AustinHellerRepo/GameManager | 2eee8e821f551b4683e59ea8cde7e61c26cf8878 | [
"MIT"
] | null | null | null | test/latency_position_test.py | AustinHellerRepo/GameManager | 2eee8e821f551b4683e59ea8cde7e61c26cf8878 | [
"MIT"
] | null | null | null | test/latency_position_test.py | AustinHellerRepo/GameManager | 2eee8e821f551b4683e59ea8cde7e61c26cf8878 | [
"MIT"
] | null | null | null | from __future__ import annotations
import unittest
import time
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Tuple, Dict, Set, Callable, Type
class Dot():
def __init__(self, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float]):
self.__position = position
self.__velocity = velocity
self.__acceleration = acceleration
self.__time_index_offset = 0
self.__acceleration_delta = None # type: Tuple[float, float]
self.__acceleration_delta_end_time_index = None # type: float
self.__acceleration_delta_end_time_index_acceleration = None # type: Tuple[float, float]
def set_positiion(self, *, position: Tuple[float, float]):
self.__position = position
def set_velocity(self, *, velocity: Tuple[float, float]):
self.__velocity = velocity
def set_acceleration(self, *, acceleration: Tuple[float, float]):
self.__acceleration = acceleration
def get_position(self, *, time_index: float) -> Tuple[float, float]:
calculated_time_index = time_index + self.__time_index_offset
position = list(self.__position)
for dimension_index in range(len(position)):
position[dimension_index] += self.__velocity[dimension_index] * calculated_time_index
if self.__acceleration_delta_end_time_index is None:
position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0
else:
if calculated_time_index < self.__acceleration_delta_end_time_index:
position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0
position[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index ** 3) / 6.0
else:
position[dimension_index] += (self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index ** 2) / 2.0
position[dimension_index] += (self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) ** 2) / 2.0
position[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index ** 3) / 6.0
return tuple(position)
def get_velocity(self, *, time_index: float) -> Tuple[float, float]:
calculated_time_index = time_index + self.__time_index_offset
velocity = list(self.__velocity)
for dimension_index in range(len(velocity)):
if self.__acceleration_delta_end_time_index is None:
velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index
else:
if calculated_time_index < self.__acceleration_delta_end_time_index:
velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index
velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index**2) / 2.0
else:
velocity[dimension_index] += self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index
velocity[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index)
velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index**2) / 2.0
return tuple(velocity)
def get_acceleration(self, *, time_index: float) -> Tuple[float, float]:
calculated_time_index = time_index + self.__time_index_offset
acceleration = [0] * len(self.__position)
for dimension_index in range(len(acceleration)):
if self.__acceleration_delta_end_time_index is None:
acceleration[dimension_index] += self.__acceleration[dimension_index]
else:
if calculated_time_index < self.__acceleration_delta_end_time_index:
acceleration[dimension_index] += self.__acceleration[dimension_index]
acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index)
else:
acceleration[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index]
acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index)
return tuple(self.__acceleration)
def bounce(self, *, time_index: float):
bounce_position = self.get_position(
time_index=time_index
)
bounce_velocity = self.get_velocity(
time_index=time_index
)
bounce_acceleration = self.get_acceleration(
time_index=time_index
)
self.__position = bounce_position
self.__velocity = (bounce_velocity[0], -bounce_velocity[1])
self.__acceleration = bounce_acceleration
calculated_time_index = time_index + self.__time_index_offset
if self.__acceleration_delta_end_time_index is not None:
self.__acceleration_delta_end_time_index -= calculated_time_index
if self.__acceleration_delta_end_time_index <= 0:
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
self.__time_index_offset = -time_index
def reflect(self, *, time_index: float):
reflect_position = self.get_position(
time_index=time_index
)
reflect_velocity = self.get_velocity(
time_index=time_index
)
reflect_acceleration = self.get_acceleration(
time_index=time_index
)
self.__position = reflect_position
self.__velocity = (-reflect_velocity[0], reflect_velocity[1])
self.__acceleration = reflect_acceleration
calculated_time_index = time_index + self.__time_index_offset
if self.__acceleration_delta_end_time_index is not None:
self.__acceleration_delta_end_time_index -= calculated_time_index
if self.__acceleration_delta_end_time_index <= 0:
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
self.__time_index_offset = -time_index
def set_state(self, *, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float], time_index: float):
self.__position = position
self.__velocity = velocity
self.__acceleration = acceleration
calculated_time_index = time_index + self.__time_index_offset
if self.__acceleration_delta_end_time_index is not None:
self.__acceleration_delta_end_time_index -= calculated_time_index
if self.__acceleration_delta_end_time_index <= 0:
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
self.__time_index_offset = -time_index
def set_acceleration_delta(self, *, time_index: float, acceleration_delta: Tuple[float, float], end_time_index: float):
time_index_position = self.get_position(
time_index=time_index
)
time_index_velocity = self.get_velocity(
time_index=time_index
)
time_index_acceleration = self.get_acceleration(
time_index=time_index
)
self.__position = time_index_position
self.__velocity = time_index_velocity
self.__acceleration = time_index_acceleration
self.__time_index_offset = -time_index
self.__acceleration_delta = acceleration_delta
self.__acceleration_delta_end_time_index = end_time_index
self.__acceleration_delta_end_time_index_acceleration = time_index_acceleration
def merge(self, *, dot: Dot, current_time_index: float, merge_time_index_offset: float):
self_position = self.get_position(
time_index=current_time_index
)
self_velocity = self.get_velocity(
time_index=current_time_index
)
destination_position = dot.get_position(
time_index=current_time_index + merge_time_index_offset
)
destination_velocity = dot.get_velocity(
time_index=current_time_index + merge_time_index_offset
)
destination_acceleration = dot.get_acceleration(
time_index=current_time_index + merge_time_index_offset
)
acceleration_delta = []
acceleration = []
for dimension_index in range(len(self.__position)):
temp_acceleration_delta = (-12 * destination_position[dimension_index] + 6 * destination_velocity[dimension_index] * merge_time_index_offset + 12 * self_position[dimension_index] + 6 * self_velocity[dimension_index] * merge_time_index_offset) / (merge_time_index_offset**3)
temp_acceleration = (destination_velocity[dimension_index] - self_velocity[dimension_index]) / merge_time_index_offset - 0.5 * temp_acceleration_delta * merge_time_index_offset
acceleration_delta.append(temp_acceleration_delta)
acceleration.append(temp_acceleration)
self.__position = self_position
self.__velocity = self_velocity
self.__acceleration = tuple(acceleration)
self.__acceleration_delta = tuple(acceleration_delta)
self.__acceleration_delta_end_time_index = merge_time_index_offset
self.__acceleration_delta_end_time_index_acceleration = destination_acceleration
self.__time_index_offset = -current_time_index
class DotPlotter():
def __init__(self, minimum_position: Tuple[float, float], maximum_position: Tuple[float, float]):
self.__minimum_position = minimum_position
self.__maximum_position = maximum_position
self.__dots = [] # type: List[Dot]
self.__x = []
self.__y = []
self.__figure = None
self.__scatter = None
def add_dot(self, *, dot: Dot):
self.__dots.append(dot)
def __get_scatter(self, *, time_index: float) -> Tuple[List[float], List[float]]:
scatter = ([], [])
for dot in self.__dots:
position = dot.get_position(
time_index=time_index
)
if position[1] < self.__minimum_position[1]:
dot.bounce(
time_index=time_index
)
if position[0] < self.__minimum_position[0] or position[0] > self.__maximum_position[0]:
dot.reflect(
time_index=time_index
)
scatter[0].append(position[0])
scatter[1].append(position[1])
print(f"position: {position}")
return scatter
def show(self):
plt.ion()
self.__figure, ax = plt.subplots()
self.__scatter = ax.scatter(self.__x, self.__y, facecolors="none", edgecolors=["black", "red"], s=10)
plt.xlim(self.__minimum_position[0], self.__maximum_position[0])
plt.ylim(self.__minimum_position[1], self.__maximum_position[1])
plt.draw()
def refresh(self, *, time_index: float):
x, y = self.__get_scatter(
time_index=time_index
)
self.__x.clear()
self.__x.extend(x)
self.__y.clear()
self.__y.extend(y)
self.__scatter.set_offsets(np.c_[self.__x, self.__y])
self.__figure.canvas.draw_idle()
plt.pause(0.01)
class LatencyPositionTest(unittest.TestCase):
def test_initialize(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
self.assertIsNotNone(dot_plotter)
def test_move_dot_along_path(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
while time_index < 20.0:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
plt.waitforbuttonpress()
def test_move_dot_along_path_in_separate_windows(self):
dot_plotters_total = 2
dot_plotters = []
for dot_plotter_index in range(dot_plotters_total):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
dot_plotters.append(dot_plotter)
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
while time_index < 10.0:
for dot_plotter in dot_plotters:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
plt.waitforbuttonpress()
def test_move_dot_along_path_then_alter_state(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
def alter_dot(*, time_index: float):
nonlocal dot
dot.set_state(
position=dot.get_position(
time_index=time_index
),
velocity=(-1, 1),
acceleration=(0, -1),
time_index=time_index
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 20.0
is_altered = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > maximum_time_index / 2.0:
alter_dot(
time_index=time_index
)
is_altered = True
plt.waitforbuttonpress()
def test_move_dot_along_path_then_set_acceleration_delta(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
def alter_dot(*, time_index: float):
nonlocal dot
dot.set_acceleration_delta(
time_index=time_index,
acceleration_delta=(0, 0.5),
end_time_index=5.0
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 30.0
alter_time_index = 10.0
is_altered = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > alter_time_index:
alter_dot(
time_index=time_index
)
is_altered = True
plt.waitforbuttonpress()
def test_move_two_dots_along_path_in_same_windows(self):
dots_total = 2
dots = []
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
for dot_index in range(dots_total):
dot = Dot(
position=(1, 9),
velocity=(dot_index + 1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dots.append(dot)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 20.0
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
plt.waitforbuttonpress()
def test_move_two_dots_along_path_in_same_windows_but_first_gets_acceleration_delta(self):
dots_total = 2
dots = []
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
for dot_index in range(dots_total):
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dots.append(dot)
dot_plotter.show()
def alter_dot(*, time_index: float):
nonlocal dots
dots[0].set_acceleration_delta(
time_index=time_index,
acceleration_delta=(0, 0.5),
end_time_index=5.0
)
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 30.0
alter_time_index = 10.0
is_altered = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > alter_time_index:
alter_dot(
time_index=time_index
)
is_altered = True
plt.waitforbuttonpress()
def test_move_two_dots_along_path_in_same_windows_second_merges_specific_time_index_after_first_altered(self):
dots_total = 2
dots = []
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
for dot_index in range(dots_total):
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dots.append(dot)
dot_plotter.show()
def alter_dot(*, time_index: float):
nonlocal dots
if False:
dots[0].set_acceleration_delta(
time_index=time_index,
acceleration_delta=(0, 0.5),
end_time_index=1.0
)
else:
dots[0].set_velocity(
velocity=(-1, 1)
)
def merge_dot(*, time_index: float):
nonlocal dots
dots[1].merge(
dot=dots[0],
current_time_index=time_index,
merge_time_index_offset=1.0
)
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.01
maximum_time_index = 30.0
alter_time_index = 10.0
merge_time_index = 11.0
is_altered = False
is_merged = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > alter_time_index:
alter_dot(
time_index=time_index
)
is_altered = True
if not is_merged and time_index > merge_time_index:
merge_dot(
time_index=time_index
)
is_merged = True
plt.waitforbuttonpress()
| 28.305228 | 276 | 0.742985 | from __future__ import annotations
import unittest
import time
import matplotlib.pyplot as plt
import numpy as np
from typing import List, Tuple, Dict, Set, Callable, Type
class Dot():
def __init__(self, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float]):
self.__position = position
self.__velocity = velocity
self.__acceleration = acceleration
self.__time_index_offset = 0
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
def set_positiion(self, *, position: Tuple[float, float]):
self.__position = position
def set_velocity(self, *, velocity: Tuple[float, float]):
self.__velocity = velocity
def set_acceleration(self, *, acceleration: Tuple[float, float]):
self.__acceleration = acceleration
def get_position(self, *, time_index: float) -> Tuple[float, float]:
calculated_time_index = time_index + self.__time_index_offset
position = list(self.__position)
for dimension_index in range(len(position)):
position[dimension_index] += self.__velocity[dimension_index] * calculated_time_index
if self.__acceleration_delta_end_time_index is None:
position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0
else:
if calculated_time_index < self.__acceleration_delta_end_time_index:
position[dimension_index] += (self.__acceleration[dimension_index] * calculated_time_index ** 2) / 2.0
position[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index ** 3) / 6.0
else:
position[dimension_index] += (self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index ** 2) / 2.0
position[dimension_index] += (self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index) ** 2) / 2.0
position[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index ** 3) / 6.0
return tuple(position)
def get_velocity(self, *, time_index: float) -> Tuple[float, float]:
calculated_time_index = time_index + self.__time_index_offset
velocity = list(self.__velocity)
for dimension_index in range(len(velocity)):
if self.__acceleration_delta_end_time_index is None:
velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index
else:
if calculated_time_index < self.__acceleration_delta_end_time_index:
velocity[dimension_index] += self.__acceleration[dimension_index] * calculated_time_index
velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index**2) / 2.0
else:
velocity[dimension_index] += self.__acceleration[dimension_index] * self.__acceleration_delta_end_time_index
velocity[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index] * (calculated_time_index - self.__acceleration_delta_end_time_index)
velocity[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index**2) / 2.0
return tuple(velocity)
def get_acceleration(self, *, time_index: float) -> Tuple[float, float]:
calculated_time_index = time_index + self.__time_index_offset
acceleration = [0] * len(self.__position)
for dimension_index in range(len(acceleration)):
if self.__acceleration_delta_end_time_index is None:
acceleration[dimension_index] += self.__acceleration[dimension_index]
else:
if calculated_time_index < self.__acceleration_delta_end_time_index:
acceleration[dimension_index] += self.__acceleration[dimension_index]
acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * calculated_time_index)
else:
acceleration[dimension_index] += self.__acceleration_delta_end_time_index_acceleration[dimension_index]
acceleration[dimension_index] += (self.__acceleration_delta[dimension_index] * self.__acceleration_delta_end_time_index)
return tuple(self.__acceleration)
def bounce(self, *, time_index: float):
bounce_position = self.get_position(
time_index=time_index
)
bounce_velocity = self.get_velocity(
time_index=time_index
)
bounce_acceleration = self.get_acceleration(
time_index=time_index
)
self.__position = bounce_position
self.__velocity = (bounce_velocity[0], -bounce_velocity[1])
self.__acceleration = bounce_acceleration
calculated_time_index = time_index + self.__time_index_offset
if self.__acceleration_delta_end_time_index is not None:
self.__acceleration_delta_end_time_index -= calculated_time_index
if self.__acceleration_delta_end_time_index <= 0:
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
self.__time_index_offset = -time_index
def reflect(self, *, time_index: float):
reflect_position = self.get_position(
time_index=time_index
)
reflect_velocity = self.get_velocity(
time_index=time_index
)
reflect_acceleration = self.get_acceleration(
time_index=time_index
)
self.__position = reflect_position
self.__velocity = (-reflect_velocity[0], reflect_velocity[1])
self.__acceleration = reflect_acceleration
calculated_time_index = time_index + self.__time_index_offset
if self.__acceleration_delta_end_time_index is not None:
self.__acceleration_delta_end_time_index -= calculated_time_index
if self.__acceleration_delta_end_time_index <= 0:
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
self.__time_index_offset = -time_index
def set_state(self, *, position: Tuple[float, float], velocity: Tuple[float, float], acceleration: Tuple[float, float], time_index: float):
self.__position = position
self.__velocity = velocity
self.__acceleration = acceleration
calculated_time_index = time_index + self.__time_index_offset
if self.__acceleration_delta_end_time_index is not None:
self.__acceleration_delta_end_time_index -= calculated_time_index
if self.__acceleration_delta_end_time_index <= 0:
self.__acceleration_delta = None
self.__acceleration_delta_end_time_index = None
self.__acceleration_delta_end_time_index_acceleration = None
self.__time_index_offset = -time_index
def set_acceleration_delta(self, *, time_index: float, acceleration_delta: Tuple[float, float], end_time_index: float):
time_index_position = self.get_position(
time_index=time_index
)
time_index_velocity = self.get_velocity(
time_index=time_index
)
time_index_acceleration = self.get_acceleration(
time_index=time_index
)
self.__position = time_index_position
self.__velocity = time_index_velocity
self.__acceleration = time_index_acceleration
self.__time_index_offset = -time_index
self.__acceleration_delta = acceleration_delta
self.__acceleration_delta_end_time_index = end_time_index
self.__acceleration_delta_end_time_index_acceleration = time_index_acceleration
def merge(self, *, dot: Dot, current_time_index: float, merge_time_index_offset: float):
self_position = self.get_position(
time_index=current_time_index
)
self_velocity = self.get_velocity(
time_index=current_time_index
)
destination_position = dot.get_position(
time_index=current_time_index + merge_time_index_offset
)
destination_velocity = dot.get_velocity(
time_index=current_time_index + merge_time_index_offset
)
destination_acceleration = dot.get_acceleration(
time_index=current_time_index + merge_time_index_offset
)
acceleration_delta = []
acceleration = []
for dimension_index in range(len(self.__position)):
temp_acceleration_delta = (-12 * destination_position[dimension_index] + 6 * destination_velocity[dimension_index] * merge_time_index_offset + 12 * self_position[dimension_index] + 6 * self_velocity[dimension_index] * merge_time_index_offset) / (merge_time_index_offset**3)
temp_acceleration = (destination_velocity[dimension_index] - self_velocity[dimension_index]) / merge_time_index_offset - 0.5 * temp_acceleration_delta * merge_time_index_offset
acceleration_delta.append(temp_acceleration_delta)
acceleration.append(temp_acceleration)
self.__position = self_position
self.__velocity = self_velocity
self.__acceleration = tuple(acceleration)
self.__acceleration_delta = tuple(acceleration_delta)
self.__acceleration_delta_end_time_index = merge_time_index_offset
self.__acceleration_delta_end_time_index_acceleration = destination_acceleration
self.__time_index_offset = -current_time_index
class DotPlotter():
def __init__(self, minimum_position: Tuple[float, float], maximum_position: Tuple[float, float]):
self.__minimum_position = minimum_position
self.__maximum_position = maximum_position
self.__dots = []
self.__x = []
self.__y = []
self.__figure = None
self.__scatter = None
def add_dot(self, *, dot: Dot):
self.__dots.append(dot)
def __get_scatter(self, *, time_index: float) -> Tuple[List[float], List[float]]:
scatter = ([], [])
for dot in self.__dots:
position = dot.get_position(
time_index=time_index
)
if position[1] < self.__minimum_position[1]:
dot.bounce(
time_index=time_index
)
if position[0] < self.__minimum_position[0] or position[0] > self.__maximum_position[0]:
dot.reflect(
time_index=time_index
)
scatter[0].append(position[0])
scatter[1].append(position[1])
print(f"position: {position}")
return scatter
def show(self):
plt.ion()
self.__figure, ax = plt.subplots()
self.__scatter = ax.scatter(self.__x, self.__y, facecolors="none", edgecolors=["black", "red"], s=10)
plt.xlim(self.__minimum_position[0], self.__maximum_position[0])
plt.ylim(self.__minimum_position[1], self.__maximum_position[1])
plt.draw()
def refresh(self, *, time_index: float):
x, y = self.__get_scatter(
time_index=time_index
)
self.__x.clear()
self.__x.extend(x)
self.__y.clear()
self.__y.extend(y)
self.__scatter.set_offsets(np.c_[self.__x, self.__y])
self.__figure.canvas.draw_idle()
plt.pause(0.01)
class LatencyPositionTest(unittest.TestCase):
def test_initialize(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
self.assertIsNotNone(dot_plotter)
def test_move_dot_along_path(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
while time_index < 20.0:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
plt.waitforbuttonpress()
def test_move_dot_along_path_in_separate_windows(self):
dot_plotters_total = 2
dot_plotters = []
for dot_plotter_index in range(dot_plotters_total):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
dot_plotters.append(dot_plotter)
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
while time_index < 10.0:
for dot_plotter in dot_plotters:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
plt.waitforbuttonpress()
def test_move_dot_along_path_then_alter_state(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
def alter_dot(*, time_index: float):
nonlocal dot
dot.set_state(
position=dot.get_position(
time_index=time_index
),
velocity=(-1, 1),
acceleration=(0, -1),
time_index=time_index
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 20.0
is_altered = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > maximum_time_index / 2.0:
alter_dot(
time_index=time_index
)
is_altered = True
plt.waitforbuttonpress()
def test_move_dot_along_path_then_set_acceleration_delta(self):
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
def alter_dot(*, time_index: float):
nonlocal dot
dot.set_acceleration_delta(
time_index=time_index,
acceleration_delta=(0, 0.5),
end_time_index=5.0
)
dot_plotter.add_dot(
dot=dot
)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 30.0
alter_time_index = 10.0
is_altered = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > alter_time_index:
alter_dot(
time_index=time_index
)
is_altered = True
plt.waitforbuttonpress()
def test_move_two_dots_along_path_in_same_windows(self):
dots_total = 2
dots = []
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
for dot_index in range(dots_total):
dot = Dot(
position=(1, 9),
velocity=(dot_index + 1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dots.append(dot)
dot_plotter.show()
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 20.0
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
plt.waitforbuttonpress()
def test_move_two_dots_along_path_in_same_windows_but_first_gets_acceleration_delta(self):
dots_total = 2
dots = []
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
for dot_index in range(dots_total):
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dots.append(dot)
dot_plotter.show()
def alter_dot(*, time_index: float):
nonlocal dots
dots[0].set_acceleration_delta(
time_index=time_index,
acceleration_delta=(0, 0.5),
end_time_index=5.0
)
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.05
maximum_time_index = 30.0
alter_time_index = 10.0
is_altered = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > alter_time_index:
alter_dot(
time_index=time_index
)
is_altered = True
plt.waitforbuttonpress()
def test_move_two_dots_along_path_in_same_windows_second_merges_specific_time_index_after_first_altered(self):
dots_total = 2
dots = []
dot_plotter = DotPlotter(
minimum_position=(0, 0),
maximum_position=(10, 10)
)
for dot_index in range(dots_total):
dot = Dot(
position=(1, 9),
velocity=(1, 0),
acceleration=(0, -1)
)
dot_plotter.add_dot(
dot=dot
)
dots.append(dot)
dot_plotter.show()
def alter_dot(*, time_index: float):
nonlocal dots
if False:
dots[0].set_acceleration_delta(
time_index=time_index,
acceleration_delta=(0, 0.5),
end_time_index=1.0
)
else:
dots[0].set_velocity(
velocity=(-1, 1)
)
def merge_dot(*, time_index: float):
nonlocal dots
dots[1].merge(
dot=dots[0],
current_time_index=time_index,
merge_time_index_offset=1.0
)
print(f"refreshing")
time_index = 0.0
time_index_delta = 0.01
maximum_time_index = 30.0
alter_time_index = 10.0
merge_time_index = 11.0
is_altered = False
is_merged = False
while time_index < maximum_time_index:
dot_plotter.refresh(
time_index=time_index
)
time_index += time_index_delta
if not is_altered and time_index > alter_time_index:
alter_dot(
time_index=time_index
)
is_altered = True
if not is_merged and time_index > merge_time_index:
merge_dot(
time_index=time_index
)
is_merged = True
plt.waitforbuttonpress()
| true | true |
f723c14378cc4d16c4baa11917ffffcdd73ab43e | 190 | py | Python | mmdet3d/ops/paconv/__init__.py | maskjp/mmdetection3d | 98f332372b1a4c82bc2d57588a5d764f4176c869 | [
"Apache-2.0"
] | 1 | 2022-03-04T19:29:42.000Z | 2022-03-04T19:29:42.000Z | mmdet3d/ops/paconv/__init__.py | maskjp/mmdetection3d | 98f332372b1a4c82bc2d57588a5d764f4176c869 | [
"Apache-2.0"
] | null | null | null | mmdet3d/ops/paconv/__init__.py | maskjp/mmdetection3d | 98f332372b1a4c82bc2d57588a5d764f4176c869 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) OpenMMLab. All rights reserved.
from .assign_score import assign_score_withk
from .paconv import PAConv, PAConvCUDA
__all__ = ['assign_score_withk', 'PAConv', 'PAConvCUDA']
| 31.666667 | 56 | 0.784211 |
from .assign_score import assign_score_withk
from .paconv import PAConv, PAConvCUDA
__all__ = ['assign_score_withk', 'PAConv', 'PAConvCUDA']
| true | true |
f723c28d10fea756d280aba926ac651ddd9a5b0d | 354 | py | Python | wiser_care_theme/wiser_care_theme/doctype/wiser_website_settings/wiser_website_settings.py | MostafaFekry/wiser_care_theme | 9892442803dffeeb5e02136c87c2eb4cc9144b60 | [
"MIT"
] | null | null | null | wiser_care_theme/wiser_care_theme/doctype/wiser_website_settings/wiser_website_settings.py | MostafaFekry/wiser_care_theme | 9892442803dffeeb5e02136c87c2eb4cc9144b60 | [
"MIT"
] | null | null | null | wiser_care_theme/wiser_care_theme/doctype/wiser_website_settings/wiser_website_settings.py | MostafaFekry/wiser_care_theme | 9892442803dffeeb5e02136c87c2eb4cc9144b60 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Systematic and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class WiserWebsiteSettings(Document):
def on_update(self):
from frappe.website.render import clear_cache
clear_cache("index")
| 27.230769 | 49 | 0.788136 |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
class WiserWebsiteSettings(Document):
def on_update(self):
from frappe.website.render import clear_cache
clear_cache("index")
| true | true |
f723c4bdd9779dd352fdad7e5c8f3b6996c4c2ec | 982 | py | Python | data-pipline.py | helloxms/Disaster-Response-Pipline-Udacity | cd623ec339b766a7e91fe2e35de95062885c07de | [
"MIT"
] | null | null | null | data-pipline.py | helloxms/Disaster-Response-Pipline-Udacity | cd623ec339b766a7e91fe2e35de95062885c07de | [
"MIT"
] | null | null | null | data-pipline.py | helloxms/Disaster-Response-Pipline-Udacity | cd623ec339b766a7e91fe2e35de95062885c07de | [
"MIT"
] | null | null | null | # import packages
import sys
def load_data(data_file):
# read in file
# clean data
# load to database
# define features and label arrays
return X, y
def build_model():
# text processing and model pipeline
# define parameters for GridSearchCV
# create gridsearch object and return as final model pipeline
return model_pipeline
def train(X, y, model):
# train test split
# fit model
# output model test results
return model
def export_model(model):
# Export model as a pickle file
def run_pipeline(data_file):
X, y = load_data(data_file) # run ETL pipeline
model = build_model() # build model pipeline
model = train(X, y, model) # train model pipeline
export_model(model) # save model
if __name__ == '__main__':
data_file = sys.argv[1] # get filename of dataset
run_pipeline(data_file) # run data pipeline | 16.098361 | 66 | 0.62831 |
import sys
def load_data(data_file):
return X, y
def build_model():
return model_pipeline
def train(X, y, model):
return model
def export_model(model):
def run_pipeline(data_file):
X, y = load_data(data_file)
model = build_model()
model = train(X, y, model)
export_model(model)
if __name__ == '__main__':
data_file = sys.argv[1]
run_pipeline(data_file) | false | true |
f723c54aee85d58c016b328753e3fa153669b6d6 | 2,533 | py | Python | voctocore/tests/test_audiomix_multiple_sources.py | 0xflotus/voctomix | 3156f3546890e6ae8d379df17e5cc718eee14b15 | [
"MIT"
] | 521 | 2015-01-07T21:43:30.000Z | 2022-03-17T22:07:13.000Z | voctocore/tests/test_audiomix_multiple_sources.py | 0xflotus/voctomix | 3156f3546890e6ae8d379df17e5cc718eee14b15 | [
"MIT"
] | 241 | 2015-05-27T10:11:09.000Z | 2022-02-11T03:29:20.000Z | voctocore/tests/test_audiomix_multiple_sources.py | 0xflotus/voctomix | 3156f3546890e6ae8d379df17e5cc718eee14b15 | [
"MIT"
] | 111 | 2015-08-13T20:06:52.000Z | 2022-03-11T09:48:46.000Z | import unittest
from lib.errors.configuration_error import ConfigurationError
from tests.helper.voctomix_test import VoctomixTest
from lib.audiomix import AudioMix
from lib.config import Config
# noinspection PyUnusedLocal
class AudiomixMultipleSources(VoctomixTest):
def test_no_configured_audiosource_sets_first_to_full(self):
audiomixer = AudioMix()
self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"])
self.assertListEqual(audiomixer.volumes, [1.0, 0.0, 0.0])
def test_audiosource_sets_source_volume_to_full(self):
Config.given("mix", "audiosource", "cam2")
audiomixer = AudioMix()
self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"])
self.assertListEqual(audiomixer.volumes, [0.0, 1.0, 0.0])
def test_per_source_volumes_set_volumes_to_configured_level(self):
Config.given("source.cam1", "volume", "0.23")
Config.given("source.cam2", "volume", "0.0")
Config.given("source.grabber", "volume", "0.42")
audiomixer = AudioMix()
self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"])
self.assertListEqual(audiomixer.volumes, [0.23, 0.0, 0.42])
def test_audiosource_together_with_per_source_volumes_for_the_same_source_raises_an_error(self):
Config.given("mix", "audiosource", "cam1")
Config.given("source.cam1", "volume", "0.23")
with self.assertRaises(ConfigurationError):
audiomixer = AudioMix()
def test_audiosource_together_with_per_source_volumes_for_different_sources_raises_an_error(self):
Config.given("mix", "audiosource", "cam2")
Config.given("source.cam1", "volume", "0.23")
with self.assertRaises(ConfigurationError):
audiomixer = AudioMix()
def test_invalid_audiosource_raises_an_error(self):
Config.given("mix", "audiosource", "camInvalid")
with self.assertRaises(ConfigurationError):
audiomixer = AudioMix()
def test_configuring_audiosource_disables_ui_audio_selector(self):
Config.given("mix", "audiosource", "cam1")
audiomixer = AudioMix()
self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False)
def test_configuring_per_source_volumes_disables_ui_audio_selector(self):
Config.given("source.cam1", "volume", "1.0")
audiomixer = AudioMix()
self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False)
if __name__ == '__main__':
unittest.main()
| 36.185714 | 102 | 0.698776 | import unittest
from lib.errors.configuration_error import ConfigurationError
from tests.helper.voctomix_test import VoctomixTest
from lib.audiomix import AudioMix
from lib.config import Config
class AudiomixMultipleSources(VoctomixTest):
def test_no_configured_audiosource_sets_first_to_full(self):
audiomixer = AudioMix()
self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"])
self.assertListEqual(audiomixer.volumes, [1.0, 0.0, 0.0])
def test_audiosource_sets_source_volume_to_full(self):
Config.given("mix", "audiosource", "cam2")
audiomixer = AudioMix()
self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"])
self.assertListEqual(audiomixer.volumes, [0.0, 1.0, 0.0])
def test_per_source_volumes_set_volumes_to_configured_level(self):
Config.given("source.cam1", "volume", "0.23")
Config.given("source.cam2", "volume", "0.0")
Config.given("source.grabber", "volume", "0.42")
audiomixer = AudioMix()
self.assertListEqual(audiomixer.names, ["cam1", "cam2", "grabber"])
self.assertListEqual(audiomixer.volumes, [0.23, 0.0, 0.42])
def test_audiosource_together_with_per_source_volumes_for_the_same_source_raises_an_error(self):
Config.given("mix", "audiosource", "cam1")
Config.given("source.cam1", "volume", "0.23")
with self.assertRaises(ConfigurationError):
audiomixer = AudioMix()
def test_audiosource_together_with_per_source_volumes_for_different_sources_raises_an_error(self):
Config.given("mix", "audiosource", "cam2")
Config.given("source.cam1", "volume", "0.23")
with self.assertRaises(ConfigurationError):
audiomixer = AudioMix()
def test_invalid_audiosource_raises_an_error(self):
Config.given("mix", "audiosource", "camInvalid")
with self.assertRaises(ConfigurationError):
audiomixer = AudioMix()
def test_configuring_audiosource_disables_ui_audio_selector(self):
Config.given("mix", "audiosource", "cam1")
audiomixer = AudioMix()
self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False)
def test_configuring_per_source_volumes_disables_ui_audio_selector(self):
Config.given("source.cam1", "volume", "1.0")
audiomixer = AudioMix()
self.assertEqual(Config.getboolean('audio', 'volumecontrol'), False)
if __name__ == '__main__':
unittest.main()
| true | true |
f723c5c9d3b222d7776eada4ac0bace2610445b6 | 450 | py | Python | V2RaycSpider1225/BusinessCentralLayer/middleware/work_io.py | kujie0121/V2RayCloudSpider | 32cd65f7811374679fe09cfae2fda805d42fe7ab | [
"MIT"
] | 1 | 2021-02-17T07:51:09.000Z | 2021-02-17T07:51:09.000Z | V2RaycSpider1225/BusinessCentralLayer/middleware/work_io.py | kujie0121/V2RayCloudSpider | 32cd65f7811374679fe09cfae2fda805d42fe7ab | [
"MIT"
] | null | null | null | V2RaycSpider1225/BusinessCentralLayer/middleware/work_io.py | kujie0121/V2RayCloudSpider | 32cd65f7811374679fe09cfae2fda805d42fe7ab | [
"MIT"
] | null | null | null | __all__ = ['Middleware']
from gevent.queue import Queue
# 工作栈
class Middleware:
# cache of redis
zeus = Queue()
# Trash
apollo = Queue()
theseus = {}
# 共享任务队列
poseidon = Queue()
hera = Queue()
# FIXME
# 不明原因bug 使用dict(zip())方案生成的同样的变量,
# 在经过同一个函数方案后输出竟然不一样
cache_redis_queue = {'ssr': {}, 'v2ray': {}}
# cache_redis_queue = dict(zip(CRAWLER_SEQUENCE, [{}] * CRAWLER_SEQUENCE.__len__()))
| 16.666667 | 88 | 0.6 | __all__ = ['Middleware']
from gevent.queue import Queue
class Middleware:
zeus = Queue()
apollo = Queue()
theseus = {}
poseidon = Queue()
hera = Queue()
cache_redis_queue = {'ssr': {}, 'v2ray': {}}
| true | true |
f723c61d42e207f1fe86c447483a8df191033920 | 2,661 | py | Python | tests/test_basics.py | JunyongYao/flask-backend-seed | 9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d | [
"MIT"
] | 9 | 2017-10-20T09:26:09.000Z | 2021-01-28T02:54:43.000Z | tests/test_basics.py | JunyongYao/flask-backend-seed | 9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d | [
"MIT"
] | 2 | 2018-03-06T06:27:53.000Z | 2018-04-19T01:47:38.000Z | tests/test_basics.py | JunyongYao/flask-backend-seed | 9d16f56a9f34ebb1ec32eaab800b7ad6b10d0c9d | [
"MIT"
] | 2 | 2019-07-18T22:32:28.000Z | 2020-06-15T14:10:29.000Z | # -*- coding: utf-8 -*-
import json
import random
import string
import unittest
from flask import current_app
from config import config
from app import create_app, db, redis, add_api_support
class BasicsTestCase(unittest.TestCase):
def setUp(self):
test_app = create_app(config['testing'])
test_app = add_api_support(test_app)
self.assertTrue(current_app.config['TESTING'])
self.app_context = test_app.app_context()
self.app_context.push()
self.test_client = test_app.test_client()
db.drop_all()
db.create_all()
redis.flushall()
def tearDown(self):
db.session.remove()
self.app_context.pop()
@staticmethod
def _parse_result(res_data):
data = str(res_data, "utf-8")
try:
ret_data = json.loads(data)
except ValueError:
ret_data = data
return ret_data
@staticmethod
def generate_random_string(length):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(length))
def create_test_user(self, name, pwd):
from app.model.userModel import UserInfo
stored_pwd = UserInfo.generate_sha_pwd(pwd)
new_user = UserInfo(name=name,
sha_pwd=stored_pwd,
nickname=self.generate_random_string(5))
db.session.add(new_user)
db.session.commit()
return new_user
def get_request(self, url, data=None, header=None):
response = self.test_client.get(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def put_request(self, url, data=None, header=None):
response = self.test_client.put(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def post_request(self, url, data=None, header=None):
response = self.test_client.post(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def delete_request(self, url, data=None, header=None):
response = self.test_client.delete(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def post_login(self, data):
url = "/api/sample/login"
return self.post_request(url, data=data)
def get_user_info(self, header):
url = "/api/sample/user_info"
return self.get_request(url, header=header)
def put_user_info(self, data, header):
url = "/api/sample/user_info"
return self.put_request(url, data=data, header=header)
| 32.45122 | 100 | 0.661405 |
import json
import random
import string
import unittest
from flask import current_app
from config import config
from app import create_app, db, redis, add_api_support
class BasicsTestCase(unittest.TestCase):
def setUp(self):
test_app = create_app(config['testing'])
test_app = add_api_support(test_app)
self.assertTrue(current_app.config['TESTING'])
self.app_context = test_app.app_context()
self.app_context.push()
self.test_client = test_app.test_client()
db.drop_all()
db.create_all()
redis.flushall()
def tearDown(self):
db.session.remove()
self.app_context.pop()
@staticmethod
def _parse_result(res_data):
data = str(res_data, "utf-8")
try:
ret_data = json.loads(data)
except ValueError:
ret_data = data
return ret_data
@staticmethod
def generate_random_string(length):
return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(length))
def create_test_user(self, name, pwd):
from app.model.userModel import UserInfo
stored_pwd = UserInfo.generate_sha_pwd(pwd)
new_user = UserInfo(name=name,
sha_pwd=stored_pwd,
nickname=self.generate_random_string(5))
db.session.add(new_user)
db.session.commit()
return new_user
def get_request(self, url, data=None, header=None):
response = self.test_client.get(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def put_request(self, url, data=None, header=None):
response = self.test_client.put(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def post_request(self, url, data=None, header=None):
response = self.test_client.post(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def delete_request(self, url, data=None, header=None):
response = self.test_client.delete(url, data=data, headers=header)
return response.status_code, self._parse_result(response.data)
def post_login(self, data):
url = "/api/sample/login"
return self.post_request(url, data=data)
def get_user_info(self, header):
url = "/api/sample/user_info"
return self.get_request(url, header=header)
def put_user_info(self, data, header):
url = "/api/sample/user_info"
return self.put_request(url, data=data, header=header)
| true | true |
f723c69ef811ef7571c423cdbc18fa2896c2bcf8 | 4,253 | py | Python | datagen/meerkat.py | tilleyd/point-proposal-net | 3731984046cd56101238de30a441d610b79bc8b0 | [
"MIT"
] | null | null | null | datagen/meerkat.py | tilleyd/point-proposal-net | 3731984046cd56101238de30a441d610b79bc8b0 | [
"MIT"
] | null | null | null | datagen/meerkat.py | tilleyd/point-proposal-net | 3731984046cd56101238de30a441d610b79bc8b0 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
IMAGE_SIZE = 1024
NUM_SOURCES = 200
NUM_IMAGES = 1
RA_MIN, RA_MAX = (-50, 50)
DEC_MIN, DEC_MAX = (-30, 50)
OFFSET_RA = 1.1 # need to fine tune to fit sources into image
OFFSET_DEC = 0.7
#
# measure bg rms
#
from astropy.io import fits
import numpy as np
try:
# if there is already a background image, use it
bg = np.squeeze(fits.getdata('output/0-nosource-image.fits'))
sigma = np.std(bg)
except Exception:
print('Warning: no background reference found, using default sigma')
sigma = 0.000033921602153
#
# create sky models
#
# create an equal number of point sources with flux from 0.33sigma to 10sigma
# with bins of size 0.33
bins = np.arange(1.0/3.0, 10.1, 1.0/3.0)
sources_per_bin = NUM_SOURCES // len(bins)
total_sources = sources_per_bin * len(bins)
if NUM_SOURCES != total_sources:
print('Warning: the number of sources is not divisible by the bins')
print(' images will have %d/%d sources' %(total_sources, NUM_SOURCES))
#
# create stimela recipe
#
import stimela
INPUT = 'input'
OUTPUT = 'output'
MSDIR = 'msdir'
MS = 'meerkat.ms'
recipe = stimela.Recipe(name='Make noise image populated with sources',
ms_dir=MSDIR,
JOB_TYPE='udocker')
for img in range(0, NUM_IMAGES):
# determine field centre
centre_ra = np.random.uniform(RA_MIN, RA_MAX)
centre_dec = np.random.uniform(DEC_MIN, DEC_MAX)
# create sky model
with open('input/%d-skymodel.txt' %(img), 'w') as f:
f.write('#format: ra_d dec_d i\n')
for flux in bins:
for i in range(0, sources_per_bin):
ra = np.random.uniform(centre_ra-OFFSET_RA, centre_ra+OFFSET_RA)
dec = np.random.uniform(centre_dec-OFFSET_DEC, centre_dec+OFFSET_DEC)
f.write(' %.2f %.2f %.15f\n' %(ra, dec, flux * sigma))
f.write('~\n')
# create empty measurement set (only once per set of images)
recipe.add('cab/simms',
'simms',
{
"msname" : MS,
"telescope" : "meerkat",
"synthesis" : 1, # exposure time, 1 hour
"dtime" : 60*5, # integration time, 5 minutes
"freq0" : '1400MHz', # starting frequency
"dfreq" : '2.0MHz', # channel width
"nchan" : 10, # number of channels
"direction" : 'J2000,%.1fdeg,%.1fdeg' %(centre_ra, centre_dec), # telescope pointing target
"feed" : 'perfect R L',
"pol" : 'RR RL LR LL',
},
input=INPUT,
output=OUTPUT,
label='simms:: Create empty MS')
for img in range(0, NUM_IMAGES):
# simulate noise into measurement set
recipe.add('cab/simulator',
'simsky',
{
"msname" : MS,
"addnoise" : True,
"sefd" : 450,
"threads" : 16,
"column" : 'DATA',
},
input=INPUT,
output=OUTPUT,
label='simsky:: Simulate sky')
# image the noise
recipe.add('cab/wsclean',
'image',
{
"msname" : MS,
"prefix" : "%d-nosource" %(img), # image prefix
"column" : "DATA",
"weight" : "briggs 1.5",
"cellsize" : 5, # pixel size in arcsec
"npix" : IMAGE_SIZE,
"trim" : 1000,
"niter" : 10000,
},
input=INPUT,
output=OUTPUT,
label='image:: Image data')
# add point sources
recipe.add('cab/tigger_restore',
'add_sources',
{
"input-image" : '%d-nosource-image.fits:output' %(img), # the noise image
"input-skymodel" : '%d-skymodel.txt' %(img), # the input skymodel
"output-image" : '%d-sky.fits' %(img), # output image name
"force" : True,
},
input=INPUT,
output=OUTPUT,
label='add_sources%d' %(img))
recipe.run()
| 30.818841 | 106 | 0.517987 |
IMAGE_SIZE = 1024
NUM_SOURCES = 200
NUM_IMAGES = 1
RA_MIN, RA_MAX = (-50, 50)
DEC_MIN, DEC_MAX = (-30, 50)
OFFSET_RA = 1.1
OFFSET_DEC = 0.7
from astropy.io import fits
import numpy as np
try:
bg = np.squeeze(fits.getdata('output/0-nosource-image.fits'))
sigma = np.std(bg)
except Exception:
print('Warning: no background reference found, using default sigma')
sigma = 0.000033921602153
bins = np.arange(1.0/3.0, 10.1, 1.0/3.0)
sources_per_bin = NUM_SOURCES // len(bins)
total_sources = sources_per_bin * len(bins)
if NUM_SOURCES != total_sources:
print('Warning: the number of sources is not divisible by the bins')
print(' images will have %d/%d sources' %(total_sources, NUM_SOURCES))
import stimela
INPUT = 'input'
OUTPUT = 'output'
MSDIR = 'msdir'
MS = 'meerkat.ms'
recipe = stimela.Recipe(name='Make noise image populated with sources',
ms_dir=MSDIR,
JOB_TYPE='udocker')
for img in range(0, NUM_IMAGES):
centre_ra = np.random.uniform(RA_MIN, RA_MAX)
centre_dec = np.random.uniform(DEC_MIN, DEC_MAX)
with open('input/%d-skymodel.txt' %(img), 'w') as f:
f.write('#format: ra_d dec_d i\n')
for flux in bins:
for i in range(0, sources_per_bin):
ra = np.random.uniform(centre_ra-OFFSET_RA, centre_ra+OFFSET_RA)
dec = np.random.uniform(centre_dec-OFFSET_DEC, centre_dec+OFFSET_DEC)
f.write(' %.2f %.2f %.15f\n' %(ra, dec, flux * sigma))
f.write('~\n')
recipe.add('cab/simms',
'simms',
{
"msname" : MS,
"telescope" : "meerkat",
"synthesis" : 1,
"dtime" : 60*5,
"freq0" : '1400MHz',
"dfreq" : '2.0MHz',
"nchan" : 10,
"direction" : 'J2000,%.1fdeg,%.1fdeg' %(centre_ra, centre_dec),
"feed" : 'perfect R L',
"pol" : 'RR RL LR LL',
},
input=INPUT,
output=OUTPUT,
label='simms:: Create empty MS')
for img in range(0, NUM_IMAGES):
recipe.add('cab/simulator',
'simsky',
{
"msname" : MS,
"addnoise" : True,
"sefd" : 450,
"threads" : 16,
"column" : 'DATA',
},
input=INPUT,
output=OUTPUT,
label='simsky:: Simulate sky')
recipe.add('cab/wsclean',
'image',
{
"msname" : MS,
"prefix" : "%d-nosource" %(img),
"column" : "DATA",
"weight" : "briggs 1.5",
"cellsize" : 5,
"npix" : IMAGE_SIZE,
"trim" : 1000,
"niter" : 10000,
},
input=INPUT,
output=OUTPUT,
label='image:: Image data')
recipe.add('cab/tigger_restore',
'add_sources',
{
"input-image" : '%d-nosource-image.fits:output' %(img),
"input-skymodel" : '%d-skymodel.txt' %(img),
"output-image" : '%d-sky.fits' %(img),
"force" : True,
},
input=INPUT,
output=OUTPUT,
label='add_sources%d' %(img))
recipe.run()
| false | true |
f723c6be07f0c68bbd987b45988d3ea7dc170622 | 24,126 | py | Python | refactorings/utils/utils_listener_fast.py | mossj77/CodART | ac83a49a4aa9310b09da12fb476a84586812310b | [
"MIT"
] | 1 | 2021-10-10T23:56:49.000Z | 2021-10-10T23:56:49.000Z | refactorings/utils/utils_listener_fast.py | pouorix/CodART | 84b35a5a14e583d88319d7f6c2de8dc3b3dc83b2 | [
"MIT"
] | null | null | null | refactorings/utils/utils_listener_fast.py | pouorix/CodART | 84b35a5a14e583d88319d7f6c2de8dc3b3dc83b2 | [
"MIT"
] | null | null | null | import re # regular expressions
import antlr4
from antlr4.Token import CommonToken
import antlr4.tree
from antlr4.CommonTokenStream import CommonTokenStream
from typing import List, Optional
from gen.java.JavaParser import JavaParser
from gen.java.JavaParserListener import JavaParserListener
class Program:
def __init__(self):
self.packages = {}
def __str__(self):
return str(self.packages)
class Package:
def __init__(self):
self.name = None
self.classes = {}
self.package_ctx = None
def __str__(self):
return str(self.name) + " " + str(self.classes)
class TokensInfo:
"""Note that start and stop are inclusive."""
def __init__(self, parser_context=None):
if parser_context is not None:
self.token_stream: CommonTokenStream = parser_context.parser.getTokenStream()
self.start: int = parser_context.start.tokenIndex
self.stop: int = parser_context.stop.tokenIndex
else:
self.token_stream: CommonTokenStream = None
self.start: int = None
self.stop: int = None
def get_token_index(self, tokens: list, start: int, stop: int):
return tokens[start:stop]
class FileInfo:
def __init__(self, filename: str = None, package_name: str = None):
self.filename: str = filename
self.package_name: str = package_name
self.all_imports = []
self.package_imports = []
self.class_imports = []
def has_imported_class(self, package_name: str, class_name: str) -> bool:
if self.package_name == package_name:
return True
return (
any(lambda x: x.package_name == package_name for package_import in self.package_imports)
or any(lambda x: x.package_name == package_name and x.class_name == class_name for class_import in
self.class_imports)
)
def has_imported_package(self, package_name: str):
if self.package_name == package_name:
return True
return (
any(lambda x: x.package_name == package_name for package_import in self.package_imports)
)
class SingleFileElement:
"""The base class for those elements that are extracted from a single file"""
def __init__(self, parser_context, filename: str = None, _file_info: FileInfo = None):
self.parser_context = parser_context
self.filename = filename
self.file_info = _file_info
def get_token_stream(self) -> CommonTokenStream:
return self.parser_context.parser.getTokenStream()
def get_tokens_info(self) -> TokensInfo:
return TokensInfo(
self.parser_context
)
def get_first_symbol(self) -> CommonToken:
first_terminal = self.parser_context
while not isinstance(first_terminal, antlr4.tree.Tree.TerminalNode):
first_terminal = first_terminal.getChild(0)
return first_terminal.getSymbol()
def get_last_symbol(self) -> CommonToken:
last_terminal = self.parser_context
while not isinstance(last_terminal, antlr4.tree.Tree.TerminalNode):
last_terminal = last_terminal.getChild(last_terminal.getChildCount() - 1)
return last_terminal.getSymbol()
def get_file_position_range(self) -> tuple:
return (
self.get_first_symbol().start,
self.get_last_symbol().stop
)
def get_text_from_file(self, filename=None) -> str:
if filename is None:
filename = self.filename
if filename is None:
return None
file = open(filename, 'r')
text = file.read()
file.close()
return text[self.get_first_symbol().start:self.get_last_symbol().stop + 1]
class ClassImport(SingleFileElement):
"""import package_name.class_name;"""
def __init__(self,
package_name: str = None,
class_name: str = None,
parser_context: JavaParser.ImportDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.package_name = package_name
self.class_name = class_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
def __str__(self):
return "import " + str(self.package_name) + '.' + str(self.class_name)
class PackageImport(SingleFileElement):
"""import package_name.*;"""
def __init__(self,
package_name: str = None,
parser_context: JavaParser.ImportDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.package_name = package_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
def __str__(self):
return "import " + str(self.package_name) + ".*"
class Class(SingleFileElement):
def __init__(self,
name: str = None,
super_class_name: str = None,
package_name: str = None,
parser_context: JavaParser.ClassDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.modifiers = []
self.modifiers_parser_contexts = []
self.name = name
self.superclass_name = None
self.superinterface_names = []
self.fields = {}
self.methods = {}
self.package_name = package_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
self.body_context = None
def find_methods_with_name(self, name: str) -> list:
result = []
for mk in self.methods:
m = self.methods[mk]
if m.name == name:
result.append(m)
return result
def __str__(self):
return str(self.modifiers) + " " + str(self.name) \
+ ((" extends " + str(self.superclass_name)) if self.superclass_name is not None else "") \
+ ((" implements " + str(self.superinterface_names)) if len(self.superinterface_names) > 0 else "") \
+ " " + str(self.fields) \
+ " " + str(self.methods)
class Field(SingleFileElement):
def __init__(self,
datatype: str = None,
name: str = None,
initializer: str = None,
package_name: str = None,
class_name: str = None,
parser_context: JavaParser.FieldDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.modifiers = []
self.modifiers_parser_contexts = []
self.datatype = datatype
self.name = name
self.initializer = initializer
self.neighbor_names = []
self.all_variable_declarator_contexts = []
self.index_in_variable_declarators: int = None
self.package_name = package_name
self.class_name = class_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
def __str__(self):
return str(self.modifiers) + " " + str(self.datatype) + " " + str(self.name)
class Method(SingleFileElement):
def __init__(self,
returntype: str = None,
name: str = None,
body_text: str = None,
package_name: str = None,
class_name: str = None,
parser_context=None,
filename: str = None,
file_info: FileInfo = None):
self.modifiers = []
self.modifiers_parser_contexts = []
self.returntype = returntype
self.name = name
self.parameters = []
self.body_text = body_text
self.body_method_invocations = {}
self.body_local_vars_and_expr_names = [] # Type: either LocalVariable, ExpressionName or MethodInvocation
self.package_name = package_name
self.class_name = class_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
self.formalparam_context = None
self.body_method_invocations_without_typename = {}
self.method_declaration_context = None
self.is_constructor = False
def __str__(self):
return str(self.modifiers) + " " + str(self.returntype) + " " + str(self.name) \
+ str(tuple(self.parameters))
class LocalVariable:
def __init__(self, datatype: str = None, identifier: str = None,
parser_context: JavaParser.LocalVariableDeclarationContext = None):
self.datatype = datatype
self.identifier = identifier
self.parser_context = parser_context
class ExpressionName:
def __init__(self, dot_separated_identifiers: list):
self.dot_separated_identifiers = dot_separated_identifiers
class MethodInvocation:
def __init__(self, dot_separated_identifiers: list, parser_context: JavaParser.ExpressionContext = None):
self.dot_separated_identifiers = dot_separated_identifiers
self.parser_context = parser_context
class UtilsListener(JavaParserListener):
def __init__(self, filename):
self.package = Package()
self.last_modifiers = []
self.last_modifiers_contexts = []
self.current_class_identifier = None
self.current_class_identifier_temp = None
self.nest_count = 0
self.current_method_identifier = None
self.current_method = None
self.current_local_var_type = None
self.current_local_var_ctx = None
self.current_field_decl = None
self.current_field_ids = None
self.current_field_dims = None
self.current_field_inits = None
self.current_field_var_ctxs = None
self.filename = filename
self.file_info = FileInfo(filename=filename)
self.field_enter_count = 0
def enterPackageDeclaration(self, ctx: JavaParser.PackageDeclarationContext):
self.package.name = ctx.qualifiedName().getText()
self.file_info.package_name = self.package.name
self.package.package_ctx = ctx
def enterImportDeclaration(self, ctx: JavaParser.ImportDeclarationContext):
if ctx.STATIC() is None:
name: str = ctx.qualifiedName().getText()
if ctx.getText().endswith(".*;"): # Package import
p = name
package_import = PackageImport(
package_name=p,
parser_context=ctx,
filename=self.filename,
file_info=self.file_info
)
self.file_info.all_imports.append(package_import)
self.file_info.package_imports.append(package_import)
else: # Class import
p = None
dot_i = name.rfind('.')
if dot_i != -1:
p = name[:dot_i]
c = name[dot_i + 1:]
else:
c = name
class_import = ClassImport(
package_name=p,
class_name=c,
parser_context=ctx,
filename=self.filename,
file_info=self.file_info
)
self.file_info.all_imports.append(class_import)
self.file_info.class_imports.append(class_import)
def enterTypeDeclaration(self, ctx: JavaParser.TypeDeclarationContext):
self.last_modifiers.clear()
self.last_modifiers_contexts.clear()
for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ClassOrInterfaceModifierContext):
self.last_modifiers.append(modifier.getText())
self.last_modifiers_contexts.append(modifier)
def enterClassBodyDeclaration(self, ctx: JavaParser.ClassBodyDeclarationContext):
self.last_modifiers.clear()
self.last_modifiers_contexts.clear()
for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ModifierContext):
self.last_modifiers.append(modifier.getText())
self.last_modifiers_contexts.append(modifier)
def enterClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext):
if self.current_class_identifier is None and self.nest_count == 0:
self.current_class_identifier = ctx.IDENTIFIER().getText()
self.current_class_ctx = ctx.IDENTIFIER()
current_class = Class(
package_name=self.package.name,
parser_context=ctx,
filename=self.filename,
file_info=self.file_info
)
current_class.modifiers = self.last_modifiers.copy()
current_class.modifiers_parser_contexts = self.last_modifiers_contexts.copy()
current_class.name = self.current_class_identifier
if ctx.EXTENDS() is not None:
current_class.superclass_name = ctx.typeType().getText()
if ctx.IMPLEMENTS() is not None:
for interface_type in ctx.typeList().getChildren(lambda x: type(x) == JavaParser.TypeTypeContext):
current_class.superinterface_names.append(interface_type.getText())
self.package.classes[current_class.name] = current_class
else:
if self.nest_count == 0:
self.current_class_identifier_temp = self.current_class_identifier
self.current_class_identifier = None
self.nest_count += 1
def enterClassBody(self, ctx: JavaParser.ClassBodyContext):
if self.current_class_identifier is not None:
self.package.classes[self.current_class_identifier].body_context = ctx
def exitClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext):
if self.nest_count > 0:
self.nest_count -= 1
if self.nest_count == 0:
self.current_class_identifier = self.current_class_identifier_temp
self.current_class_identifier_temp = None
elif self.current_class_identifier is not None:
self.current_class_identifier = None
def enterFormalParameterList(self, ctx: JavaParser.FormalParameterListContext):
if self.current_method is not None:
self.current_method.formalparam_context = ctx
def enterMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext):
if self.current_class_identifier is not None:
# method_header = ctx.methodHeader()
self.current_method_identifier = ctx.IDENTIFIER().getText()
method = Method(
package_name=self.package.name,
class_name=self.current_class_identifier,
parser_context=ctx.parentCtx.parentCtx,
filename=self.filename,
file_info=self.file_info
)
method.modifiers = self.last_modifiers.copy()
method.modifiers_parser_contexts = self.last_modifiers_contexts.copy()
method.returntype = ctx.typeTypeOrVoid().getText()
method.name = self.current_method_identifier
method.is_constructor = False
# This is done on exit to collect params too, to support overloading.
# self.package.classes[self.current_class_identifier].methods[method.name] = method
self.current_method = method
def enterFormalParameters(self, ctx: JavaParser.FormalParametersContext):
if self.current_method is not None:
self.current_method.method_declaration_context = ctx
def enterFormalParameter(self, ctx: JavaParser.FormalParameterContext):
if self.current_method is not None:
self.current_method.parameters.append(
(ctx.typeType().getText(), ctx.variableDeclaratorId().IDENTIFIER().getText())
)
def enterMethodBody(self, ctx: JavaParser.MethodBodyContext):
if self.current_method is not None:
self.current_method.body_text = ctx.getText()
pass
def general_exit_method_decl(self):
if self.current_class_identifier is not None:
if self.current_method is not None:
method = self.current_method
method_key = ("" if method.name is None else method.name) + '('
is_first = True
for param in method.parameters:
if not is_first:
method_key += ','
is_first = False
method_key += param[0] # the type
method_key += ')'
self.package.classes[self.current_class_identifier].methods[method_key] = method
self.current_method_identifier = None
self.current_method = None
def exitMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext):
self.general_exit_method_decl()
def enterConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
if self.current_class_identifier is not None:
self.current_method_identifier = ctx.IDENTIFIER().getText()
method = Method(
package_name=self.package.name,
class_name=self.current_class_identifier,
parser_context=ctx.parentCtx.parentCtx,
filename=self.filename,
file_info=self.file_info
)
method.modifiers = self.last_modifiers.copy()
method.modifiers_parser_contexts = self.last_modifiers_contexts.copy()
method.returntype = None
method.name = None # self.current_method_identifier
method.body_text = ctx.constructorBody.getText()
method.is_constructor = True
# This is done on exit to collect params too, to support overloading.
# self.package.classes[self.current_class_identifier].methods[method.name] = method
self.current_method = method
def exitConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
self.general_exit_method_decl()
def enterMethodCall(self, ctx: JavaParser.MethodCallContext):
if self.current_method is not None:
if ctx.parentCtx.IDENTIFIER() != None:
if ctx.parentCtx.IDENTIFIER() not in self.current_method.body_method_invocations:
self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()] = [
ctx.IDENTIFIER().getText()]
else:
self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()].append(
ctx.IDENTIFIER().getText())
else:
a = len(ctx.parentCtx.children)
if a == 1:
if ctx.IDENTIFIER() != None:
if self.current_class_ctx not in self.current_method.body_method_invocations_without_typename:
self.current_method.body_method_invocations_without_typename[self.current_class_ctx] = [ctx]
else:
self.current_method.body_method_invocations_without_typename[self.current_class_ctx].append(
ctx)
# MethodInvocation
txt = ctx.getText()
ids = txt[:txt.find('(')].split('.')
self.current_method.body_local_vars_and_expr_names.append(
MethodInvocation(ids, ctx)
)
def enterExpression(self, ctx: JavaParser.ExpressionContext):
if self.current_method is not None:
if ctx.methodCall() is not None:
txt = ctx.getText()
ids = txt[:txt.find('(')].split('.')
self.current_method.body_local_vars_and_expr_names.append(
MethodInvocation(ids, ctx)
)
else:
names = ctx.getText().split('.')
should_add = True
for name in names:
if not re.match("^[A-Za-z0-9_]*$", name):
should_add = False
if should_add:
self.current_method.body_local_vars_and_expr_names.append(ExpressionName(names))
def enterLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext):
if self.current_method is not None:
self.current_local_var_type = ctx.typeType().getText()
self.current_local_var_ctx = ctx
# The rest in: enterVariableDeclarator
def exitLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext):
self.current_local_var_type = None
def enterFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext):
self.field_enter_count += 1
if self.current_class_identifier is not None and self.field_enter_count == 1:
modifiers = self.last_modifiers.copy()
modifiers_contexts = self.last_modifiers_contexts.copy()
datatype = ctx.typeType().getText()
self.current_field_decl = (modifiers, datatype, ctx, modifiers_contexts)
self.current_field_ids = []
self.current_field_dims = []
self.current_field_inits = []
self.current_field_var_ctxs = []
def enterVariableDeclarator(self, ctx: JavaParser.VariableDeclaratorContext):
dims = ""
v_id: str = ctx.variableDeclaratorId().getText()
dims_i = v_id.find('[')
if dims_i != -1:
dims = v_id[dims_i:]
if self.current_field_decl is not None:
self.current_field_ids.append(ctx.variableDeclaratorId().IDENTIFIER().getText())
self.current_field_dims.append(dims)
init = None
init_ctx = ctx.variableInitializer()
if init_ctx is not None:
init = init_ctx.getText()
self.current_field_inits.append(init)
self.current_field_var_ctxs.append(ctx)
if self.current_local_var_type is not None:
if self.current_method is not None:
self.current_method.body_local_vars_and_expr_names.append(
LocalVariable(
self.current_local_var_type + dims,
ctx.variableDeclaratorId().IDENTIFIER().getText(),
self.current_local_var_ctx
)
)
def exitFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext):
self.field_enter_count -= 1
if self.current_class_identifier is not None and self.field_enter_count == 0:
for i in range(len(self.current_field_ids)):
field_id = self.current_field_ids[i]
dims = self.current_field_dims[i]
field_init = self.current_field_inits[i]
var_ctx = self.current_field_var_ctxs[i]
field = Field(
package_name=self.package.name,
class_name=self.current_class_identifier,
parser_context=self.current_field_decl[2],
filename=self.filename,
file_info=self.file_info
)
field.modifiers = self.current_field_decl[0]
field.modifiers_parser_contexts = self.current_field_decl[3]
field.datatype = self.current_field_decl[1] + dims
field.name = field_id
field.initializer = field_init
field.neighbor_names = [x for x in self.current_field_ids if x != field_id]
field.all_variable_declarator_contexts = self.current_field_var_ctxs
field.index_in_variable_declarators = i
self.package.classes[self.current_class_identifier].fields[field.name] = field
self.current_field_decl = None
| 40.822335 | 116 | 0.619829 | import re
import antlr4
from antlr4.Token import CommonToken
import antlr4.tree
from antlr4.CommonTokenStream import CommonTokenStream
from typing import List, Optional
from gen.java.JavaParser import JavaParser
from gen.java.JavaParserListener import JavaParserListener
class Program:
def __init__(self):
self.packages = {}
def __str__(self):
return str(self.packages)
class Package:
def __init__(self):
self.name = None
self.classes = {}
self.package_ctx = None
def __str__(self):
return str(self.name) + " " + str(self.classes)
class TokensInfo:
def __init__(self, parser_context=None):
if parser_context is not None:
self.token_stream: CommonTokenStream = parser_context.parser.getTokenStream()
self.start: int = parser_context.start.tokenIndex
self.stop: int = parser_context.stop.tokenIndex
else:
self.token_stream: CommonTokenStream = None
self.start: int = None
self.stop: int = None
def get_token_index(self, tokens: list, start: int, stop: int):
return tokens[start:stop]
class FileInfo:
def __init__(self, filename: str = None, package_name: str = None):
self.filename: str = filename
self.package_name: str = package_name
self.all_imports = []
self.package_imports = []
self.class_imports = []
def has_imported_class(self, package_name: str, class_name: str) -> bool:
if self.package_name == package_name:
return True
return (
any(lambda x: x.package_name == package_name for package_import in self.package_imports)
or any(lambda x: x.package_name == package_name and x.class_name == class_name for class_import in
self.class_imports)
)
def has_imported_package(self, package_name: str):
if self.package_name == package_name:
return True
return (
any(lambda x: x.package_name == package_name for package_import in self.package_imports)
)
class SingleFileElement:
def __init__(self, parser_context, filename: str = None, _file_info: FileInfo = None):
self.parser_context = parser_context
self.filename = filename
self.file_info = _file_info
def get_token_stream(self) -> CommonTokenStream:
return self.parser_context.parser.getTokenStream()
def get_tokens_info(self) -> TokensInfo:
return TokensInfo(
self.parser_context
)
def get_first_symbol(self) -> CommonToken:
first_terminal = self.parser_context
while not isinstance(first_terminal, antlr4.tree.Tree.TerminalNode):
first_terminal = first_terminal.getChild(0)
return first_terminal.getSymbol()
def get_last_symbol(self) -> CommonToken:
last_terminal = self.parser_context
while not isinstance(last_terminal, antlr4.tree.Tree.TerminalNode):
last_terminal = last_terminal.getChild(last_terminal.getChildCount() - 1)
return last_terminal.getSymbol()
def get_file_position_range(self) -> tuple:
return (
self.get_first_symbol().start,
self.get_last_symbol().stop
)
def get_text_from_file(self, filename=None) -> str:
if filename is None:
filename = self.filename
if filename is None:
return None
file = open(filename, 'r')
text = file.read()
file.close()
return text[self.get_first_symbol().start:self.get_last_symbol().stop + 1]
class ClassImport(SingleFileElement):
def __init__(self,
package_name: str = None,
class_name: str = None,
parser_context: JavaParser.ImportDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.package_name = package_name
self.class_name = class_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
def __str__(self):
return "import " + str(self.package_name) + '.' + str(self.class_name)
class PackageImport(SingleFileElement):
def __init__(self,
package_name: str = None,
parser_context: JavaParser.ImportDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.package_name = package_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
def __str__(self):
return "import " + str(self.package_name) + ".*"
class Class(SingleFileElement):
def __init__(self,
name: str = None,
super_class_name: str = None,
package_name: str = None,
parser_context: JavaParser.ClassDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.modifiers = []
self.modifiers_parser_contexts = []
self.name = name
self.superclass_name = None
self.superinterface_names = []
self.fields = {}
self.methods = {}
self.package_name = package_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
self.body_context = None
def find_methods_with_name(self, name: str) -> list:
result = []
for mk in self.methods:
m = self.methods[mk]
if m.name == name:
result.append(m)
return result
def __str__(self):
return str(self.modifiers) + " " + str(self.name) \
+ ((" extends " + str(self.superclass_name)) if self.superclass_name is not None else "") \
+ ((" implements " + str(self.superinterface_names)) if len(self.superinterface_names) > 0 else "") \
+ " " + str(self.fields) \
+ " " + str(self.methods)
class Field(SingleFileElement):
def __init__(self,
datatype: str = None,
name: str = None,
initializer: str = None,
package_name: str = None,
class_name: str = None,
parser_context: JavaParser.FieldDeclarationContext = None,
filename: str = None,
file_info: FileInfo = None):
self.modifiers = []
self.modifiers_parser_contexts = []
self.datatype = datatype
self.name = name
self.initializer = initializer
self.neighbor_names = []
self.all_variable_declarator_contexts = []
self.index_in_variable_declarators: int = None
self.package_name = package_name
self.class_name = class_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
def __str__(self):
return str(self.modifiers) + " " + str(self.datatype) + " " + str(self.name)
class Method(SingleFileElement):
def __init__(self,
returntype: str = None,
name: str = None,
body_text: str = None,
package_name: str = None,
class_name: str = None,
parser_context=None,
filename: str = None,
file_info: FileInfo = None):
self.modifiers = []
self.modifiers_parser_contexts = []
self.returntype = returntype
self.name = name
self.parameters = []
self.body_text = body_text
self.body_method_invocations = {}
self.body_local_vars_and_expr_names = []
self.package_name = package_name
self.class_name = class_name
self.parser_context = parser_context
self.filename = filename
self.file_info = file_info
self.formalparam_context = None
self.body_method_invocations_without_typename = {}
self.method_declaration_context = None
self.is_constructor = False
def __str__(self):
return str(self.modifiers) + " " + str(self.returntype) + " " + str(self.name) \
+ str(tuple(self.parameters))
class LocalVariable:
def __init__(self, datatype: str = None, identifier: str = None,
parser_context: JavaParser.LocalVariableDeclarationContext = None):
self.datatype = datatype
self.identifier = identifier
self.parser_context = parser_context
class ExpressionName:
def __init__(self, dot_separated_identifiers: list):
self.dot_separated_identifiers = dot_separated_identifiers
class MethodInvocation:
def __init__(self, dot_separated_identifiers: list, parser_context: JavaParser.ExpressionContext = None):
self.dot_separated_identifiers = dot_separated_identifiers
self.parser_context = parser_context
class UtilsListener(JavaParserListener):
def __init__(self, filename):
self.package = Package()
self.last_modifiers = []
self.last_modifiers_contexts = []
self.current_class_identifier = None
self.current_class_identifier_temp = None
self.nest_count = 0
self.current_method_identifier = None
self.current_method = None
self.current_local_var_type = None
self.current_local_var_ctx = None
self.current_field_decl = None
self.current_field_ids = None
self.current_field_dims = None
self.current_field_inits = None
self.current_field_var_ctxs = None
self.filename = filename
self.file_info = FileInfo(filename=filename)
self.field_enter_count = 0
def enterPackageDeclaration(self, ctx: JavaParser.PackageDeclarationContext):
self.package.name = ctx.qualifiedName().getText()
self.file_info.package_name = self.package.name
self.package.package_ctx = ctx
def enterImportDeclaration(self, ctx: JavaParser.ImportDeclarationContext):
if ctx.STATIC() is None:
name: str = ctx.qualifiedName().getText()
if ctx.getText().endswith(".*;"):
p = name
package_import = PackageImport(
package_name=p,
parser_context=ctx,
filename=self.filename,
file_info=self.file_info
)
self.file_info.all_imports.append(package_import)
self.file_info.package_imports.append(package_import)
else:
p = None
dot_i = name.rfind('.')
if dot_i != -1:
p = name[:dot_i]
c = name[dot_i + 1:]
else:
c = name
class_import = ClassImport(
package_name=p,
class_name=c,
parser_context=ctx,
filename=self.filename,
file_info=self.file_info
)
self.file_info.all_imports.append(class_import)
self.file_info.class_imports.append(class_import)
def enterTypeDeclaration(self, ctx: JavaParser.TypeDeclarationContext):
self.last_modifiers.clear()
self.last_modifiers_contexts.clear()
for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ClassOrInterfaceModifierContext):
self.last_modifiers.append(modifier.getText())
self.last_modifiers_contexts.append(modifier)
def enterClassBodyDeclaration(self, ctx: JavaParser.ClassBodyDeclarationContext):
self.last_modifiers.clear()
self.last_modifiers_contexts.clear()
for modifier in ctx.getChildren(lambda x: type(x) == JavaParser.ModifierContext):
self.last_modifiers.append(modifier.getText())
self.last_modifiers_contexts.append(modifier)
def enterClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext):
if self.current_class_identifier is None and self.nest_count == 0:
self.current_class_identifier = ctx.IDENTIFIER().getText()
self.current_class_ctx = ctx.IDENTIFIER()
current_class = Class(
package_name=self.package.name,
parser_context=ctx,
filename=self.filename,
file_info=self.file_info
)
current_class.modifiers = self.last_modifiers.copy()
current_class.modifiers_parser_contexts = self.last_modifiers_contexts.copy()
current_class.name = self.current_class_identifier
if ctx.EXTENDS() is not None:
current_class.superclass_name = ctx.typeType().getText()
if ctx.IMPLEMENTS() is not None:
for interface_type in ctx.typeList().getChildren(lambda x: type(x) == JavaParser.TypeTypeContext):
current_class.superinterface_names.append(interface_type.getText())
self.package.classes[current_class.name] = current_class
else:
if self.nest_count == 0:
self.current_class_identifier_temp = self.current_class_identifier
self.current_class_identifier = None
self.nest_count += 1
def enterClassBody(self, ctx: JavaParser.ClassBodyContext):
if self.current_class_identifier is not None:
self.package.classes[self.current_class_identifier].body_context = ctx
def exitClassDeclaration(self, ctx: JavaParser.ClassDeclarationContext):
if self.nest_count > 0:
self.nest_count -= 1
if self.nest_count == 0:
self.current_class_identifier = self.current_class_identifier_temp
self.current_class_identifier_temp = None
elif self.current_class_identifier is not None:
self.current_class_identifier = None
def enterFormalParameterList(self, ctx: JavaParser.FormalParameterListContext):
if self.current_method is not None:
self.current_method.formalparam_context = ctx
def enterMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext):
if self.current_class_identifier is not None:
self.current_method_identifier = ctx.IDENTIFIER().getText()
method = Method(
package_name=self.package.name,
class_name=self.current_class_identifier,
parser_context=ctx.parentCtx.parentCtx,
filename=self.filename,
file_info=self.file_info
)
method.modifiers = self.last_modifiers.copy()
method.modifiers_parser_contexts = self.last_modifiers_contexts.copy()
method.returntype = ctx.typeTypeOrVoid().getText()
method.name = self.current_method_identifier
method.is_constructor = False
self.current_method = method
def enterFormalParameters(self, ctx: JavaParser.FormalParametersContext):
if self.current_method is not None:
self.current_method.method_declaration_context = ctx
def enterFormalParameter(self, ctx: JavaParser.FormalParameterContext):
if self.current_method is not None:
self.current_method.parameters.append(
(ctx.typeType().getText(), ctx.variableDeclaratorId().IDENTIFIER().getText())
)
def enterMethodBody(self, ctx: JavaParser.MethodBodyContext):
if self.current_method is not None:
self.current_method.body_text = ctx.getText()
pass
def general_exit_method_decl(self):
if self.current_class_identifier is not None:
if self.current_method is not None:
method = self.current_method
method_key = ("" if method.name is None else method.name) + '('
is_first = True
for param in method.parameters:
if not is_first:
method_key += ','
is_first = False
method_key += param[0]
method_key += ')'
self.package.classes[self.current_class_identifier].methods[method_key] = method
self.current_method_identifier = None
self.current_method = None
def exitMethodDeclaration(self, ctx: JavaParser.MethodDeclarationContext):
self.general_exit_method_decl()
def enterConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
if self.current_class_identifier is not None:
self.current_method_identifier = ctx.IDENTIFIER().getText()
method = Method(
package_name=self.package.name,
class_name=self.current_class_identifier,
parser_context=ctx.parentCtx.parentCtx,
filename=self.filename,
file_info=self.file_info
)
method.modifiers = self.last_modifiers.copy()
method.modifiers_parser_contexts = self.last_modifiers_contexts.copy()
method.returntype = None
method.name = None
method.body_text = ctx.constructorBody.getText()
method.is_constructor = True
self.current_method = method
def exitConstructorDeclaration(self, ctx: JavaParser.ConstructorDeclarationContext):
self.general_exit_method_decl()
def enterMethodCall(self, ctx: JavaParser.MethodCallContext):
if self.current_method is not None:
if ctx.parentCtx.IDENTIFIER() != None:
if ctx.parentCtx.IDENTIFIER() not in self.current_method.body_method_invocations:
self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()] = [
ctx.IDENTIFIER().getText()]
else:
self.current_method.body_method_invocations[ctx.parentCtx.IDENTIFIER()].append(
ctx.IDENTIFIER().getText())
else:
a = len(ctx.parentCtx.children)
if a == 1:
if ctx.IDENTIFIER() != None:
if self.current_class_ctx not in self.current_method.body_method_invocations_without_typename:
self.current_method.body_method_invocations_without_typename[self.current_class_ctx] = [ctx]
else:
self.current_method.body_method_invocations_without_typename[self.current_class_ctx].append(
ctx)
txt = ctx.getText()
ids = txt[:txt.find('(')].split('.')
self.current_method.body_local_vars_and_expr_names.append(
MethodInvocation(ids, ctx)
)
def enterExpression(self, ctx: JavaParser.ExpressionContext):
if self.current_method is not None:
if ctx.methodCall() is not None:
txt = ctx.getText()
ids = txt[:txt.find('(')].split('.')
self.current_method.body_local_vars_and_expr_names.append(
MethodInvocation(ids, ctx)
)
else:
names = ctx.getText().split('.')
should_add = True
for name in names:
if not re.match("^[A-Za-z0-9_]*$", name):
should_add = False
if should_add:
self.current_method.body_local_vars_and_expr_names.append(ExpressionName(names))
def enterLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext):
if self.current_method is not None:
self.current_local_var_type = ctx.typeType().getText()
self.current_local_var_ctx = ctx
def exitLocalVariableDeclaration(self, ctx: JavaParser.LocalVariableDeclarationContext):
self.current_local_var_type = None
def enterFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext):
self.field_enter_count += 1
if self.current_class_identifier is not None and self.field_enter_count == 1:
modifiers = self.last_modifiers.copy()
modifiers_contexts = self.last_modifiers_contexts.copy()
datatype = ctx.typeType().getText()
self.current_field_decl = (modifiers, datatype, ctx, modifiers_contexts)
self.current_field_ids = []
self.current_field_dims = []
self.current_field_inits = []
self.current_field_var_ctxs = []
def enterVariableDeclarator(self, ctx: JavaParser.VariableDeclaratorContext):
dims = ""
v_id: str = ctx.variableDeclaratorId().getText()
dims_i = v_id.find('[')
if dims_i != -1:
dims = v_id[dims_i:]
if self.current_field_decl is not None:
self.current_field_ids.append(ctx.variableDeclaratorId().IDENTIFIER().getText())
self.current_field_dims.append(dims)
init = None
init_ctx = ctx.variableInitializer()
if init_ctx is not None:
init = init_ctx.getText()
self.current_field_inits.append(init)
self.current_field_var_ctxs.append(ctx)
if self.current_local_var_type is not None:
if self.current_method is not None:
self.current_method.body_local_vars_and_expr_names.append(
LocalVariable(
self.current_local_var_type + dims,
ctx.variableDeclaratorId().IDENTIFIER().getText(),
self.current_local_var_ctx
)
)
def exitFieldDeclaration(self, ctx: JavaParser.FieldDeclarationContext):
self.field_enter_count -= 1
if self.current_class_identifier is not None and self.field_enter_count == 0:
for i in range(len(self.current_field_ids)):
field_id = self.current_field_ids[i]
dims = self.current_field_dims[i]
field_init = self.current_field_inits[i]
var_ctx = self.current_field_var_ctxs[i]
field = Field(
package_name=self.package.name,
class_name=self.current_class_identifier,
parser_context=self.current_field_decl[2],
filename=self.filename,
file_info=self.file_info
)
field.modifiers = self.current_field_decl[0]
field.modifiers_parser_contexts = self.current_field_decl[3]
field.datatype = self.current_field_decl[1] + dims
field.name = field_id
field.initializer = field_init
field.neighbor_names = [x for x in self.current_field_ids if x != field_id]
field.all_variable_declarator_contexts = self.current_field_var_ctxs
field.index_in_variable_declarators = i
self.package.classes[self.current_class_identifier].fields[field.name] = field
self.current_field_decl = None
| true | true |
f723c6d8b64443d90a1248040568d87fc7bf7641 | 4,464 | py | Python | tensorflow/python/keras/optimizer_v2/utils.py | luisangel86a/tensorflow | 77ee5e02721ba797fe01d47019e6017d2bb09ab7 | [
"Apache-2.0"
] | 1 | 2020-10-25T00:12:12.000Z | 2020-10-25T00:12:12.000Z | tensorflow/python/keras/optimizer_v2/utils.py | luisangel86a/tensorflow | 77ee5e02721ba797fe01d47019e6017d2bb09ab7 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/keras/optimizer_v2/utils.py | luisangel86a/tensorflow | 77ee5e02721ba797fe01d47019e6017d2bb09ab7 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.platform import tf_logging as logging
def all_reduce_sum_gradients(grads_and_vars):
"""Returns all-reduced gradients aggregated via summation.
Args:
grads_and_vars: List of (gradient, variable) pairs.
Returns:
A list of all-reduced gradients.
"""
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
# We switch to a cross-replica context since there is a bug which causes
# IndexedSlices to be converted to dense tensors when all-reduced in a
# replica context.
# TODO(b/150507409): Do not switch to a cross-replica context once the bug
# is fixed.
if filtered_grads_and_vars:
reduced = distribute_ctx.get_replica_context().merge_call(
_all_reduce_sum_fn, args=(filtered_grads_and_vars,))
else:
reduced = []
# Copy 'reduced' but add None gradients back in
reduced_with_nones = []
reduced_pos = 0
for g, _ in grads_and_vars:
if g is None:
reduced_with_nones.append(None)
else:
reduced_with_nones.append(reduced[reduced_pos])
reduced_pos += 1
assert reduced_pos == len(reduced), "Failed to add all gradients"
return reduced_with_nones
def make_gradient_clipnorm_fn(clipnorm):
"""Creates a gradient transformation function for clipping by norm."""
def gradient_clipnorm_fn(grads_and_vars):
if isinstance(distribute_ctx.get_strategy(),
central_storage_strategy.CentralStorageStrategy):
raise ValueError(
"`clipnorm` is not supported with `CenteralStorageStrategy`")
clipped_grads_and_vars = [
(clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars
]
return clipped_grads_and_vars
return gradient_clipnorm_fn
def make_gradient_clipvalue_fn(clipvalue):
"""Creates a gradient transformation function for clipping by value."""
def gradient_clipvalue_fn(grads_and_vars):
if isinstance(distribute_ctx.get_strategy(),
central_storage_strategy.CentralStorageStrategy):
raise ValueError(
"`clipvalue` is not supported with `CenteralStorageStrategy`")
clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue,
clipvalue), v)
for g, v in grads_and_vars]
return clipped_grads_and_vars
return gradient_clipvalue_fn
def filter_empty_gradients(grads_and_vars):
"""Filter out `(grad, var)` pairs that have a gradient equal to `None`."""
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients do not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _all_reduce_sum_fn(distribution, grads_and_vars):
return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM,
grads_and_vars)
| 35.428571 | 88 | 0.714382 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import central_storage_strategy
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
from tensorflow.python.distribute import reduce_util as ds_reduce_util
from tensorflow.python.ops import clip_ops
from tensorflow.python.platform import tf_logging as logging
def all_reduce_sum_gradients(grads_and_vars):
grads_and_vars = list(grads_and_vars)
filtered_grads_and_vars = filter_empty_gradients(grads_and_vars)
if filtered_grads_and_vars:
reduced = distribute_ctx.get_replica_context().merge_call(
_all_reduce_sum_fn, args=(filtered_grads_and_vars,))
else:
reduced = []
reduced_with_nones = []
reduced_pos = 0
for g, _ in grads_and_vars:
if g is None:
reduced_with_nones.append(None)
else:
reduced_with_nones.append(reduced[reduced_pos])
reduced_pos += 1
assert reduced_pos == len(reduced), "Failed to add all gradients"
return reduced_with_nones
def make_gradient_clipnorm_fn(clipnorm):
def gradient_clipnorm_fn(grads_and_vars):
if isinstance(distribute_ctx.get_strategy(),
central_storage_strategy.CentralStorageStrategy):
raise ValueError(
"`clipnorm` is not supported with `CenteralStorageStrategy`")
clipped_grads_and_vars = [
(clip_ops.clip_by_norm(g, clipnorm), v) for g, v in grads_and_vars
]
return clipped_grads_and_vars
return gradient_clipnorm_fn
def make_gradient_clipvalue_fn(clipvalue):
def gradient_clipvalue_fn(grads_and_vars):
if isinstance(distribute_ctx.get_strategy(),
central_storage_strategy.CentralStorageStrategy):
raise ValueError(
"`clipvalue` is not supported with `CenteralStorageStrategy`")
clipped_grads_and_vars = [(clip_ops.clip_by_value(g, -clipvalue,
clipvalue), v)
for g, v in grads_and_vars]
return clipped_grads_and_vars
return gradient_clipvalue_fn
def filter_empty_gradients(grads_and_vars):
grads_and_vars = tuple(grads_and_vars)
if not grads_and_vars:
return grads_and_vars
filtered = []
vars_with_empty_grads = []
for grad, var in grads_and_vars:
if grad is None:
vars_with_empty_grads.append(var)
else:
filtered.append((grad, var))
filtered = tuple(filtered)
if not filtered:
raise ValueError("No gradients provided for any variable: %s." %
([v.name for _, v in grads_and_vars],))
if vars_with_empty_grads:
logging.warning(
("Gradients do not exist for variables %s when minimizing the loss."),
([v.name for v in vars_with_empty_grads]))
return filtered
def _all_reduce_sum_fn(distribution, grads_and_vars):
return distribution.extended.batch_reduce_to(ds_reduce_util.ReduceOp.SUM,
grads_and_vars)
| true | true |
f723c71f72bc88cff1f65f82dbd10987ac1732ba | 9,771 | py | Python | stable_baselines3/common/vec_env/subproc_vec_env.py | qgallouedec/stable-baselines3 | a6f5049a99a4c21a6f0bcce458ca3306cef310e0 | [
"MIT"
] | null | null | null | stable_baselines3/common/vec_env/subproc_vec_env.py | qgallouedec/stable-baselines3 | a6f5049a99a4c21a6f0bcce458ca3306cef310e0 | [
"MIT"
] | null | null | null | stable_baselines3/common/vec_env/subproc_vec_env.py | qgallouedec/stable-baselines3 | a6f5049a99a4c21a6f0bcce458ca3306cef310e0 | [
"MIT"
] | null | null | null | import multiprocessing as mp
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
from stable_baselines3.common.vec_env.base_vec_env import (
CloudpickleWrapper,
VecEnv,
VecEnvIndices,
VecEnvObs,
VecEnvStepReturn,
)
def _worker(
remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper
) -> None:
# Import here to avoid a circular import
from stable_baselines3.common.env_util import is_wrapped
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
# save final observation where user can get it, then reset
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
elif cmd == "is_wrapped":
remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
"""
Creates a multiprocess vectorized wrapper for multiple environments, distributing each environment to its own
process, allowing significant speed up when the environment is computationally complex.
For performance reasons, if your environment is not IO bound, the number of environments should not exceed the
number of logical cores on your CPU.
.. warning::
Only 'forkserver' and 'spawn' start methods are thread-safe,
which is important when TensorFlow sessions or other non thread-safe
libraries are used in the parent (see issue #217). However, compared to
'fork' they incur a small start-up cost and have restrictions on
global variables. With those methods, users must wrap the code in an
``if __name__ == "__main__":`` block.
For more information, see the multiprocessing documentation.
:param env_fns: Environments to run in subprocesses
:param start_method: method used to start the subprocesses.
Must be one of the methods returned by multiprocessing.get_all_start_methods().
Defaults to 'forkserver' on available platforms, and 'spawn' otherwise.
"""
def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
# Fork is not a thread safe method (see issue #217)
# but is more user friendly (does not require to wrap the code in
# a `if __name__ == "__main__":`)
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
# daemon=True: if the main process crashes, we should not cause things to hang
process = ctx.Process(target=_worker, args=args, daemon=True) # pytype:disable=attribute-error
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
if seed is None:
seed = np.random.randint(0, 2**32 - 1)
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
# gather images from subprocesses
# `mode` will be taken into account later
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
"""Return attribute from vectorized environment (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
"""Set attribute inside vectorized environments (see base class)."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
"""Call instance methods of vectorized environments."""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:
"""Check if worker environments are wrapped with a given wrapper"""
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
"""
Get the connection object needed to communicate with the wanted
envs that are in subprocesses.
:param indices: refers to indices of envs.
:return: Connection object to communicate between processes.
"""
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs:
"""
Flatten observations, depending on the observation space.
:param obs: observations.
A list or tuple of observations, one per environment.
Each environment observation may be a NumPy array, or a dict or tuple of NumPy arrays.
:return: flattened observations.
A flattened NumPy array or an OrderedDict or tuple of flattened numpy arrays.
Each NumPy array has the environment index as its first axis.
"""
assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple(np.stack([o[i] for o in obs]) for i in range(obs_len))
else:
return np.stack(obs)
| 43.816143 | 118 | 0.635861 | import multiprocessing as mp
from collections import OrderedDict
from typing import Any, Callable, List, Optional, Sequence, Tuple, Type, Union
import gym
import numpy as np
from stable_baselines3.common.vec_env.base_vec_env import (
CloudpickleWrapper,
VecEnv,
VecEnvIndices,
VecEnvObs,
VecEnvStepReturn,
)
def _worker(
remote: mp.connection.Connection, parent_remote: mp.connection.Connection, env_fn_wrapper: CloudpickleWrapper
) -> None:
from stable_baselines3.common.env_util import is_wrapped
parent_remote.close()
env = env_fn_wrapper.var()
while True:
try:
cmd, data = remote.recv()
if cmd == "step":
observation, reward, done, info = env.step(data)
if done:
info["terminal_observation"] = observation
observation = env.reset()
remote.send((observation, reward, done, info))
elif cmd == "seed":
remote.send(env.seed(data))
elif cmd == "reset":
observation = env.reset()
remote.send(observation)
elif cmd == "render":
remote.send(env.render(data))
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_spaces":
remote.send((env.observation_space, env.action_space))
elif cmd == "env_method":
method = getattr(env, data[0])
remote.send(method(*data[1], **data[2]))
elif cmd == "get_attr":
remote.send(getattr(env, data))
elif cmd == "set_attr":
remote.send(setattr(env, data[0], data[1]))
elif cmd == "is_wrapped":
remote.send(is_wrapped(env, data))
else:
raise NotImplementedError(f"`{cmd}` is not implemented in the worker")
except EOFError:
break
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns: List[Callable[[], gym.Env]], start_method: Optional[str] = None):
self.waiting = False
self.closed = False
n_envs = len(env_fns)
if start_method is None:
forkserver_available = "forkserver" in mp.get_all_start_methods()
start_method = "forkserver" if forkserver_available else "spawn"
ctx = mp.get_context(start_method)
self.remotes, self.work_remotes = zip(*[ctx.Pipe() for _ in range(n_envs)])
self.processes = []
for work_remote, remote, env_fn in zip(self.work_remotes, self.remotes, env_fns):
args = (work_remote, remote, CloudpickleWrapper(env_fn))
process = ctx.Process(target=_worker, args=args, daemon=True)
process.start()
self.processes.append(process)
work_remote.close()
self.remotes[0].send(("get_spaces", None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions: np.ndarray) -> None:
for remote, action in zip(self.remotes, actions):
remote.send(("step", action))
self.waiting = True
def step_wait(self) -> VecEnvStepReturn:
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return _flatten_obs(obs, self.observation_space), np.stack(rews), np.stack(dones), infos
def seed(self, seed: Optional[int] = None) -> List[Union[None, int]]:
if seed is None:
seed = np.random.randint(0, 2**32 - 1)
for idx, remote in enumerate(self.remotes):
remote.send(("seed", seed + idx))
return [remote.recv() for remote in self.remotes]
def reset(self) -> VecEnvObs:
for remote in self.remotes:
remote.send(("reset", None))
obs = [remote.recv() for remote in self.remotes]
return _flatten_obs(obs, self.observation_space)
def close(self) -> None:
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(("close", None))
for process in self.processes:
process.join()
self.closed = True
def get_images(self) -> Sequence[np.ndarray]:
for pipe in self.remotes:
pipe.send(("render", "rgb_array"))
imgs = [pipe.recv() for pipe in self.remotes]
return imgs
def get_attr(self, attr_name: str, indices: VecEnvIndices = None) -> List[Any]:
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("get_attr", attr_name))
return [remote.recv() for remote in target_remotes]
def set_attr(self, attr_name: str, value: Any, indices: VecEnvIndices = None) -> None:
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("set_attr", (attr_name, value)))
for remote in target_remotes:
remote.recv()
def env_method(self, method_name: str, *method_args, indices: VecEnvIndices = None, **method_kwargs) -> List[Any]:
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("env_method", (method_name, method_args, method_kwargs)))
return [remote.recv() for remote in target_remotes]
def env_is_wrapped(self, wrapper_class: Type[gym.Wrapper], indices: VecEnvIndices = None) -> List[bool]:
target_remotes = self._get_target_remotes(indices)
for remote in target_remotes:
remote.send(("is_wrapped", wrapper_class))
return [remote.recv() for remote in target_remotes]
def _get_target_remotes(self, indices: VecEnvIndices) -> List[Any]:
indices = self._get_indices(indices)
return [self.remotes[i] for i in indices]
def _flatten_obs(obs: Union[List[VecEnvObs], Tuple[VecEnvObs]], space: gym.spaces.Space) -> VecEnvObs:
assert isinstance(obs, (list, tuple)), "expected list or tuple of observations per environment"
assert len(obs) > 0, "need observations from at least one environment"
if isinstance(space, gym.spaces.Dict):
assert isinstance(space.spaces, OrderedDict), "Dict space must have ordered subspaces"
assert isinstance(obs[0], dict), "non-dict observation for environment with Dict observation space"
return OrderedDict([(k, np.stack([o[k] for o in obs])) for k in space.spaces.keys()])
elif isinstance(space, gym.spaces.Tuple):
assert isinstance(obs[0], tuple), "non-tuple observation for environment with Tuple observation space"
obs_len = len(space.spaces)
return tuple(np.stack([o[i] for o in obs]) for i in range(obs_len))
else:
return np.stack(obs)
| true | true |
f723c763e4fef4f96747ccb5bded293f8e7b9e66 | 1,057 | py | Python | app/main/tools/test_report.py | BorrowHome/flasky-sandbox | 70ef7aa087a0954f7ff4b4845f6599d8481ef0b1 | [
"Apache-2.0"
] | 1 | 2021-03-15T02:59:13.000Z | 2021-03-15T02:59:13.000Z | app/main/tools/test_report.py | BorrowHome/flasky-sandbox | 70ef7aa087a0954f7ff4b4845f6599d8481ef0b1 | [
"Apache-2.0"
] | 6 | 2021-03-19T09:49:44.000Z | 2022-03-12T00:10:14.000Z | app/main/tools/test_report.py | BorrowHome/flasky-sandbox | 70ef7aa087a0954f7ff4b4845f6599d8481ef0b1 | [
"Apache-2.0"
] | 2 | 2020-01-11T13:39:22.000Z | 2020-07-02T03:57:43.000Z | import json
from flask import request, render_template
from app.utils.docx.docx import set_sand_docxtpl
from config import Config
from app.main import main
@main.route('/test_report/', methods=['GET', 'POST'])
def test_report():
if request.method == 'POST':
file_location = Config.SAVE_DOCUMENT_PATH
origin_data = request.get_data()
str_data = str(origin_data, encoding='utf-8')
dict_data = json.loads(str_data)
with open(file_location + 'data.json', 'w') as f:
json.dump(dict_data, f)
set_sand_docxtpl(dict_data)
return "数据"
else:
return render_template('test_report.html')
@main.route('/update_report/', methods=['GET'])
def update_report():
file_location = Config.SAVE_DOCUMENT_PATH
location=request.args.get('location')
with open(file_location + 'data.json', 'r') as f:
dict_data = json.load(f)
try:
set_sand_docxtpl(dict_data,location)
return "成功"
except Exception as e:
print(str(e))
return str(e)
| 26.425 | 57 | 0.655629 | import json
from flask import request, render_template
from app.utils.docx.docx import set_sand_docxtpl
from config import Config
from app.main import main
@main.route('/test_report/', methods=['GET', 'POST'])
def test_report():
if request.method == 'POST':
file_location = Config.SAVE_DOCUMENT_PATH
origin_data = request.get_data()
str_data = str(origin_data, encoding='utf-8')
dict_data = json.loads(str_data)
with open(file_location + 'data.json', 'w') as f:
json.dump(dict_data, f)
set_sand_docxtpl(dict_data)
return "数据"
else:
return render_template('test_report.html')
@main.route('/update_report/', methods=['GET'])
def update_report():
file_location = Config.SAVE_DOCUMENT_PATH
location=request.args.get('location')
with open(file_location + 'data.json', 'r') as f:
dict_data = json.load(f)
try:
set_sand_docxtpl(dict_data,location)
return "成功"
except Exception as e:
print(str(e))
return str(e)
| true | true |
f723c8a42621a6c77b949415bc7d8ebb1efd5fee | 8,382 | py | Python | rstslide/plugins/Matplotlib/XKCDify.py | rartino/httk-rsttools | 57c46362899105a72b3b6efc45b50bcda8e574a7 | [
"MIT"
] | null | null | null | rstslide/plugins/Matplotlib/XKCDify.py | rartino/httk-rsttools | 57c46362899105a72b3b6efc45b50bcda8e574a7 | [
"MIT"
] | null | null | null | rstslide/plugins/Matplotlib/XKCDify.py | rartino/httk-rsttools | 57c46362899105a72b3b6efc45b50bcda8e574a7 | [
"MIT"
] | null | null | null | """
XKCD plot generator
-------------------
Author: Jake Vanderplas
This is a script that will take any matplotlib line diagram, and convert it
to an XKCD-style plot. It will work for plots with line & text elements,
including axes labels and titles (but not axes tick labels).
The idea for this comes from work by Damon McDougall
http://www.mail-archive.com/matplotlib-users@lists.sourceforge.net/msg25499.html
"""
import os
import numpy as np
import pylab as pl
from scipy import interpolate, signal
import matplotlib.font_manager as fm
script_path = os.path.dirname(os.path.abspath(__file__))
# We need a special font for the code below. It can be downloaded this way:
#import os
#import urllib2
#if not os.path.exists('Humor-Sans.ttf'):
# print 'Downloading the font Humor-sans.'
# fhandle = urllib2.urlopen('http://antiyawn.com/uploads/Humor-Sans.ttf')
# open('Humor-Sans.ttf', 'wb').write(fhandle.read())
def xkcd_line(x, y, xlim=None, ylim=None,
mag=1.0, f1=30, f2=0.05, f3=15):
"""
Mimic a hand-drawn line from (x, y) data
Parameters
----------
x, y : array_like
arrays to be modified
xlim, ylim : data range
the assumed plot range for the modification. If not specified,
they will be guessed from the data
mag : float
magnitude of distortions
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
Returns
-------
x, y : ndarrays
The modified lines
"""
x = np.asarray(x)
y = np.asarray(y)
# get limits for rescaling
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
# scale the data
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
# compute the total distance along the path
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
# number of interpolated points is proportional to the distance
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
# interpolate curve at sampled points
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
# we'll perturb perpendicular to the drawn line
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def XKCDify(ax, mag=1.0,
f1=50, f2=0.01, f3=15,
forecolor='k',
bgcolor='w',
xaxis_loc=None,
yaxis_loc=None,
xaxis_arrow='+',
yaxis_arrow='+',
ax_extend=0.1,
expand_axes=False):
"""Make axis look hand-drawn
This adjusts all lines, text, legends, and axes in the figure to look
like xkcd plots. Other plot elements are not modified.
Parameters
----------
ax : Axes instance
the axes to be modified.
mag : float
the magnitude of the distortion
f1, f2, f3 : int, float, int
filtering parameters. f1 gives the size of the window, f2 gives
the high-frequency cutoff, f3 gives the size of the filter
xaxis_loc, yaxis_log : float
The locations to draw the x and y axes. If not specified, they
will be drawn from the bottom left of the plot
xaxis_arrow, yaxis_arrow : str
where to draw arrows on the x/y axes. Options are '+', '-', '+-', or ''
ax_extend : float
How far (fractionally) to extend the drawn axes beyond the original
axes limits
expand_axes : bool
if True, then expand axes to fill the figure (useful if there is only
a single axes in the figure)
"""
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None:
xaxis_loc = ylim[0]
if yaxis_loc is None:
yaxis_loc = xlim[0]
# Draw axes
xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color=forecolor)
yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color=forecolor)
# Label axes3, 0.5, 'hello', fontsize=14)
ax.text(xax_lim[1], xaxis_loc - 0.05 * yspan, ax.get_xlabel(),
fontsize=14, ha='right', va='top', rotation=5)
ax.text(yaxis_loc - 0.05 * xspan, yax_lim[1], ax.get_ylabel(),
fontsize=14, ha='right', va='top', rotation=85)
ax.set_xlabel('')
ax.set_ylabel('')
# Add title
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],
ax.get_title(),
ha='center', va='bottom', fontsize=16)
ax.set_title('')
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim,
mag, f1, f2, f3)
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2 * lw)
line.set_data(x_int, y_int)
# # don't add background line for axes
# if (line is not xaxis) and (line is not yaxis):
# line_bg = pl.Line2D(x_int, y_int, color=bgcolor,
# linewidth=8 * lw)
# ax.add_line(line_bg)
ax.add_line(line)
# Draw arrow-heads at the end of axes lines
arr1 = 0.03 * np.array([-1, 0, -1])
arr2 = 0.02 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005, 2)
arr2[::2] += np.random.normal(0, 0.005, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color=forecolor, lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color=forecolor, lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect,
y[-1] + arr1 * yspan,
color=forecolor, lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect,
y[0] - arr1 * yspan,
color=forecolor, lw=2)
# Change all the fonts to humor-sans.
prop = fm.FontProperties(fname=os.path.join(script_path, 'fonts', 'Humor-Sans.ttf'), size=16)
for text in ax.texts:
text.set_fontproperties(prop)
# modify legend
leg = ax.get_legend()
if leg is not None:
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, pl.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=1., f1=100, f2=0.001))
child.set_linewidth(2 * child.get_linewidth())
if isinstance(child, pl.Text):
child.set_fontproperties(prop)
# Set the axis limits
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
# adjust the axes
ax.set_xticks([])
ax.set_yticks([])
if expand_axes:
ax.figure.set_facecolor(bgcolor)
ax.set_axis_off()
ax.set_position([0, 0, 1, 1])
return ax
| 31.393258 | 97 | 0.570866 | import os
import numpy as np
import pylab as pl
from scipy import interpolate, signal
import matplotlib.font_manager as fm
script_path = os.path.dirname(os.path.abspath(__file__))
def xkcd_line(x, y, xlim=None, ylim=None,
mag=1.0, f1=30, f2=0.05, f3=15):
x = np.asarray(x)
y = np.asarray(y)
if xlim is None:
xlim = (x.min(), x.max())
if ylim is None:
ylim = (y.min(), y.max())
if xlim[1] == xlim[0]:
xlim = ylim
if ylim[1] == ylim[0]:
ylim = xlim
x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0])
y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0])
dx = x_scaled[1:] - x_scaled[:-1]
dy = y_scaled[1:] - y_scaled[:-1]
dist_tot = np.sum(np.sqrt(dx * dx + dy * dy))
Nu = int(200 * dist_tot)
u = np.arange(-1, Nu + 1) * 1. / (Nu - 1)
k = min(3, len(x) - 1)
res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k)
x_int, y_int = interpolate.splev(u, res[0])
dx = x_int[2:] - x_int[:-2]
dy = y_int[2:] - y_int[:-2]
dist = np.sqrt(dx * dx + dy * dy)
# create a filtered perturbation
coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2)
b = signal.firwin(f1, f2 * dist_tot, window=('kaiser', f3))
response = signal.lfilter(b, 1, coeffs)
x_int[1:-1] += response * dy / dist
y_int[1:-1] += response * dx / dist
# un-scale data
x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0]
y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0]
return x_int, y_int
def XKCDify(ax, mag=1.0,
f1=50, f2=0.01, f3=15,
forecolor='k',
bgcolor='w',
xaxis_loc=None,
yaxis_loc=None,
xaxis_arrow='+',
yaxis_arrow='+',
ax_extend=0.1,
expand_axes=False):
# Get axes aspect
ext = ax.get_window_extent().extents
aspect = (ext[3] - ext[1]) / (ext[2] - ext[0])
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xspan = xlim[1] - xlim[0]
yspan = ylim[1] - xlim[0]
xax_lim = (xlim[0] - ax_extend * xspan,
xlim[1] + ax_extend * xspan)
yax_lim = (ylim[0] - ax_extend * yspan,
ylim[1] + ax_extend * yspan)
if xaxis_loc is None:
xaxis_loc = ylim[0]
if yaxis_loc is None:
yaxis_loc = xlim[0]
# Draw axes
xaxis = pl.Line2D([xax_lim[0], xax_lim[1]], [xaxis_loc, xaxis_loc],
linestyle='-', color=forecolor)
yaxis = pl.Line2D([yaxis_loc, yaxis_loc], [yax_lim[0], yax_lim[1]],
linestyle='-', color=forecolor)
# Label axes3, 0.5, 'hello', fontsize=14)
ax.text(xax_lim[1], xaxis_loc - 0.05 * yspan, ax.get_xlabel(),
fontsize=14, ha='right', va='top', rotation=5)
ax.text(yaxis_loc - 0.05 * xspan, yax_lim[1], ax.get_ylabel(),
fontsize=14, ha='right', va='top', rotation=85)
ax.set_xlabel('')
ax.set_ylabel('')
# Add title
ax.text(0.5 * (xax_lim[1] + xax_lim[0]), yax_lim[1],
ax.get_title(),
ha='center', va='bottom', fontsize=16)
ax.set_title('')
Nlines = len(ax.lines)
lines = [xaxis, yaxis] + [ax.lines.pop(0) for i in range(Nlines)]
for line in lines:
x, y = line.get_data()
x_int, y_int = xkcd_line(x, y, xlim, ylim,
mag, f1, f2, f3)
# create foreground and background line
lw = line.get_linewidth()
line.set_linewidth(2 * lw)
line.set_data(x_int, y_int)
# # don't add background line for axes
ax.add_line(line)
arr1 = 0.03 * np.array([-1, 0, -1])
arr2 = 0.02 * np.array([-1, 0, 1])
arr1[::2] += np.random.normal(0, 0.005, 2)
arr2[::2] += np.random.normal(0, 0.005, 2)
x, y = xaxis.get_data()
if '+' in str(xaxis_arrow):
ax.plot(x[-1] + arr1 * xspan * aspect,
y[-1] + arr2 * yspan,
color=forecolor, lw=2)
if '-' in str(xaxis_arrow):
ax.plot(x[0] - arr1 * xspan * aspect,
y[0] - arr2 * yspan,
color=forecolor, lw=2)
x, y = yaxis.get_data()
if '+' in str(yaxis_arrow):
ax.plot(x[-1] + arr2 * xspan * aspect,
y[-1] + arr1 * yspan,
color=forecolor, lw=2)
if '-' in str(yaxis_arrow):
ax.plot(x[0] - arr2 * xspan * aspect,
y[0] - arr1 * yspan,
color=forecolor, lw=2)
prop = fm.FontProperties(fname=os.path.join(script_path, 'fonts', 'Humor-Sans.ttf'), size=16)
for text in ax.texts:
text.set_fontproperties(prop)
leg = ax.get_legend()
if leg is not None:
leg.set_frame_on(False)
for child in leg.get_children():
if isinstance(child, pl.Line2D):
x, y = child.get_data()
child.set_data(xkcd_line(x, y, mag=1., f1=100, f2=0.001))
child.set_linewidth(2 * child.get_linewidth())
if isinstance(child, pl.Text):
child.set_fontproperties(prop)
ax.set_xlim(xax_lim[0] - 0.1 * xspan,
xax_lim[1] + 0.1 * xspan)
ax.set_ylim(yax_lim[0] - 0.1 * yspan,
yax_lim[1] + 0.1 * yspan)
ax.set_xticks([])
ax.set_yticks([])
if expand_axes:
ax.figure.set_facecolor(bgcolor)
ax.set_axis_off()
ax.set_position([0, 0, 1, 1])
return ax
| true | true |
f723c8c94f31ee4136915426bd76d4aad731bdda | 5,323 | py | Python | utils/utils_fit.py | bubbliiiing/yolox-tf2 | 0407c77858d436a6b370e591eea7963cc807f3b4 | [
"Apache-2.0"
] | 49 | 2021-11-01T06:02:21.000Z | 2022-03-29T07:08:22.000Z | utils/utils_fit.py | bubbliiiing/yolox-tf2 | 0407c77858d436a6b370e591eea7963cc807f3b4 | [
"Apache-2.0"
] | 6 | 2021-11-17T08:35:09.000Z | 2022-02-15T12:43:14.000Z | utils/utils_fit.py | bubbliiiing/yolox-tf2 | 0407c77858d436a6b370e591eea7963cc807f3b4 | [
"Apache-2.0"
] | 20 | 2021-11-19T12:03:21.000Z | 2022-03-16T01:45:25.000Z | import os
import tensorflow as tf
from nets.yolo import get_yolo_loss
from tqdm import tqdm
#------------------------------#
# 防止bug
#------------------------------#
def get_train_step_fn(strategy):
@tf.function
def train_step(imgs, targets, net, yolo_loss, optimizer):
with tf.GradientTape() as tape:
#------------------------------#
# 计算loss
#------------------------------#
P5_output, P4_output, P3_output = net(imgs, training=True)
args = [P5_output, P4_output, P3_output] + [targets]
loss_value = yolo_loss(args)
#------------------------------#
# 添加上l2正则化参数
#------------------------------#
loss_value = tf.reduce_sum(net.losses) + loss_value
grads = tape.gradient(loss_value, net.trainable_variables)
optimizer.apply_gradients(zip(grads, net.trainable_variables))
return loss_value
if strategy == None:
return train_step
else:
#----------------------#
# 多gpu训练
#----------------------#
@tf.function
def distributed_train_step(imgs, targets, net, yolo_loss, optimizer):
per_replica_losses = strategy.run(train_step, args=(imgs, targets, net, yolo_loss, optimizer,))
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,
axis=None)
return distributed_train_step
#----------------------#
# 防止bug
#----------------------#
def get_val_step_fn(strategy):
@tf.function
def val_step(imgs, targets, net, yolo_loss, optimizer):
#------------------------------#
# 计算loss
#------------------------------#
P5_output, P4_output, P3_output = net(imgs, training=False)
args = [P5_output, P4_output, P3_output] + [targets]
loss_value = yolo_loss(args)
#------------------------------#
# 添加上l2正则化参数
#------------------------------#
loss_value = tf.reduce_sum(net.losses) + loss_value
return loss_value
if strategy == None:
return val_step
else:
#----------------------#
# 多gpu验证
#----------------------#
@tf.function
def distributed_val_step(imgs, targets, net, yolo_loss, optimizer):
per_replica_losses = strategy.run(val_step, args=(imgs, targets, net, yolo_loss, optimizer,))
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,
axis=None)
return distributed_val_step
def fit_one_epoch(net, yolo_loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch,
input_shape, num_classes, save_period, save_dir, strategy):
train_step = get_train_step_fn(strategy)
val_step = get_val_step_fn(strategy)
loss = 0
val_loss = 0
print('Start Train')
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
images, targets = batch[0], batch[1]
loss_value = train_step(images, targets, net, yolo_loss, optimizer)
loss = loss + loss_value
pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1),
'lr' : optimizer.lr.numpy()})
pbar.update(1)
print('Finish Train')
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
images, targets = batch[0], batch[1]
loss_value = val_step(images, targets, net, yolo_loss, optimizer)
val_loss = val_loss + loss_value
pbar.set_postfix(**{'total_loss': float(val_loss) / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val}
loss_history.on_epoch_end([], logs)
eval_callback.on_epoch_end(epoch, logs)
print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))
#-----------------------------------------------#
# 保存权值
#-----------------------------------------------#
if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:
net.save_weights(os.path.join(save_dir, "ep%03d-loss%.3f-val_loss%.3f.h5" % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val)))
if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):
print('Save best model to best_epoch_weights.pth')
net.save_weights(os.path.join(save_dir, "best_epoch_weights.h5"))
net.save_weights(os.path.join(save_dir, "last_epoch_weights.h5")) | 43.276423 | 144 | 0.520195 | import os
import tensorflow as tf
from nets.yolo import get_yolo_loss
from tqdm import tqdm
ef get_train_step_fn(strategy):
@tf.function
def train_step(imgs, targets, net, yolo_loss, optimizer):
with tf.GradientTape() as tape:
P5_output, P4_output, P3_output = net(imgs, training=True)
args = [P5_output, P4_output, P3_output] + [targets]
loss_value = yolo_loss(args)
loss_value = tf.reduce_sum(net.losses) + loss_value
grads = tape.gradient(loss_value, net.trainable_variables)
optimizer.apply_gradients(zip(grads, net.trainable_variables))
return loss_value
if strategy == None:
return train_step
else:
@tf.function
def distributed_train_step(imgs, targets, net, yolo_loss, optimizer):
per_replica_losses = strategy.run(train_step, args=(imgs, targets, net, yolo_loss, optimizer,))
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,
axis=None)
return distributed_train_step
ef get_val_step_fn(strategy):
@tf.function
def val_step(imgs, targets, net, yolo_loss, optimizer):
P5_output, P4_output, P3_output = net(imgs, training=False)
args = [P5_output, P4_output, P3_output] + [targets]
loss_value = yolo_loss(args)
loss_value = tf.reduce_sum(net.losses) + loss_value
return loss_value
if strategy == None:
return val_step
else:
@tf.function
def distributed_val_step(imgs, targets, net, yolo_loss, optimizer):
per_replica_losses = strategy.run(val_step, args=(imgs, targets, net, yolo_loss, optimizer,))
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses,
axis=None)
return distributed_val_step
def fit_one_epoch(net, yolo_loss, loss_history, eval_callback, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch,
input_shape, num_classes, save_period, save_dir, strategy):
train_step = get_train_step_fn(strategy)
val_step = get_val_step_fn(strategy)
loss = 0
val_loss = 0
print('Start Train')
with tqdm(total=epoch_step,desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen):
if iteration >= epoch_step:
break
images, targets = batch[0], batch[1]
loss_value = train_step(images, targets, net, yolo_loss, optimizer)
loss = loss + loss_value
pbar.set_postfix(**{'total_loss': float(loss) / (iteration + 1),
'lr' : optimizer.lr.numpy()})
pbar.update(1)
print('Finish Train')
print('Start Validation')
with tqdm(total=epoch_step_val, desc=f'Epoch {epoch + 1}/{Epoch}',postfix=dict,mininterval=0.3) as pbar:
for iteration, batch in enumerate(gen_val):
if iteration >= epoch_step_val:
break
images, targets = batch[0], batch[1]
loss_value = val_step(images, targets, net, yolo_loss, optimizer)
val_loss = val_loss + loss_value
pbar.set_postfix(**{'total_loss': float(val_loss) / (iteration + 1)})
pbar.update(1)
print('Finish Validation')
logs = {'loss': loss.numpy() / epoch_step, 'val_loss': val_loss.numpy() / epoch_step_val}
loss_history.on_epoch_end([], logs)
eval_callback.on_epoch_end(epoch, logs)
print('Epoch:'+ str(epoch + 1) + '/' + str(Epoch))
print('Total Loss: %.3f || Val Loss: %.3f ' % (loss / epoch_step, val_loss / epoch_step_val))
if (epoch + 1) % save_period == 0 or epoch + 1 == Epoch:
net.save_weights(os.path.join(save_dir, "ep%03d-loss%.3f-val_loss%.3f.h5" % (epoch + 1, loss / epoch_step, val_loss / epoch_step_val)))
if len(loss_history.val_loss) <= 1 or (val_loss / epoch_step_val) <= min(loss_history.val_loss):
print('Save best model to best_epoch_weights.pth')
net.save_weights(os.path.join(save_dir, "best_epoch_weights.h5"))
net.save_weights(os.path.join(save_dir, "last_epoch_weights.h5")) | true | true |
f723c9484375a916d20b7a139369a7ec0dc8afb7 | 3,140 | py | Python | warrior/WarriorCore/Classes/war_print_class.py | YutakaMizugaki/warriorframework | 685761cf044182ec88ce86a942d4be1e150a1256 | [
"Apache-2.0"
] | 24 | 2017-06-06T15:48:08.000Z | 2021-03-17T07:52:52.000Z | warrior/WarriorCore/Classes/war_print_class.py | YutakaMizugaki/warriorframework | 685761cf044182ec88ce86a942d4be1e150a1256 | [
"Apache-2.0"
] | 272 | 2017-05-19T20:39:12.000Z | 2021-12-13T19:34:51.000Z | warrior/WarriorCore/Classes/war_print_class.py | pavithra-gowda/warrior | 19b153310552b986b86b5470fcfea9547a74c3a9 | [
"Apache-2.0"
] | 37 | 2017-05-17T21:24:37.000Z | 2021-07-24T18:09:22.000Z | '''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import sys
import re
"""
This class will trap stdout and redirects the message to logfile and stdout
It takes console_logfile and write_to_stdout ( boolean flag) as arguments.
!!! Important!!!
DO NOT import any modules from warrior/Framework package that uses
warrior/Framework/Utils/print_Utils.py at module level into this module
as it will lead to cyclic imports.
"""
def print_main(message, print_type, color_message=None, *args, **kwargs):
"""The main print function will be called by other print functions
"""
if color_message is not None:
print_string = print_type + " " + str(color_message)
elif color_message is None:
print_string = print_type + " " + str(message)
if len(args) > 0:
print_string = (print_type + " " + str(message) + str(args))
# set logging argument default to True, to write the message in the log file
if isinstance(sys.stdout, RedirectPrint):
sys.stdout.write((print_string + '\n'),
logging=kwargs.get('logging', True))
else:
sys.stdout.write(print_string + '\n')
sys.stdout.flush()
from Framework.Utils.testcase_Utils import TCOBJ
if TCOBJ.pnote is False:
TCOBJ.p_note_level(message, print_type)
return print_string
class RedirectPrint(object):
"""Class that has methods to redirect prints
from stdout to correct console log files """
def __init__(self, console_logfile):
"""Constructor"""
self.get_file(console_logfile)
# self.write_to_stdout = write_to_stdout
self.stdout = sys.stdout
def get_file(self, console_logfile):
"""If the console logfile is not None redirect sys.stdout to
console logfile
"""
self.file = console_logfile
if self.file is not None:
sys.stdout = self
def write(self, data, logging=True):
"""
- Writes data to the sys.stdout
- Writes data to log file only if the logging is True
- Removes the ansii escape chars before writing to file
"""
self.stdout.write(data)
ansi_escape = re.compile(r'\x1b[^m]*m')
data = ansi_escape.sub('', data)
# write to log file if logging is set to True
if logging is True:
self.file.write(data)
self.file.flush()
def isatty(self):
"""Check if sys.stdout is a tty """
# print self.stdout.isatty()
return self.stdout.isatty()
def flush(self):
"""flush logfile """
return self.stdout.flush()
| 35.280899 | 80 | 0.670064 |
import sys
import re
def print_main(message, print_type, color_message=None, *args, **kwargs):
if color_message is not None:
print_string = print_type + " " + str(color_message)
elif color_message is None:
print_string = print_type + " " + str(message)
if len(args) > 0:
print_string = (print_type + " " + str(message) + str(args))
if isinstance(sys.stdout, RedirectPrint):
sys.stdout.write((print_string + '\n'),
logging=kwargs.get('logging', True))
else:
sys.stdout.write(print_string + '\n')
sys.stdout.flush()
from Framework.Utils.testcase_Utils import TCOBJ
if TCOBJ.pnote is False:
TCOBJ.p_note_level(message, print_type)
return print_string
class RedirectPrint(object):
def __init__(self, console_logfile):
self.get_file(console_logfile)
self.stdout = sys.stdout
def get_file(self, console_logfile):
self.file = console_logfile
if self.file is not None:
sys.stdout = self
def write(self, data, logging=True):
self.stdout.write(data)
ansi_escape = re.compile(r'\x1b[^m]*m')
data = ansi_escape.sub('', data)
if logging is True:
self.file.write(data)
self.file.flush()
def isatty(self):
return self.stdout.isatty()
def flush(self):
return self.stdout.flush()
| true | true |
f723c959dc405638e9be66e367589488a0bb7950 | 74,965 | py | Python | doppyo/sugar.py | aaronspring/doppyo | e29e21fbb997f024f39d2e5e67decfc235b0dcca | [
"MIT"
] | null | null | null | doppyo/sugar.py | aaronspring/doppyo | e29e21fbb997f024f39d2e5e67decfc235b0dcca | [
"MIT"
] | null | null | null | doppyo/sugar.py | aaronspring/doppyo | e29e21fbb997f024f39d2e5e67decfc235b0dcca | [
"MIT"
] | null | null | null | """
Collection of old doppyo functions and useful tidbits for internal dcfp use
Authors: Dougie Squire and Thomas Moore
Date created: 01/10/2018
Python Version: 3.6
"""
# ===================================================================================================
# Packages
# ===================================================================================================
import numpy as np
import pandas as pd
import xarray as xr
import cartopy
from collections import Sequence
from itertools import chain, count
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from cartopy.util import add_cyclic_point
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
# Load doppyo packages -----
from doppyo import utils
# ===================================================================================================
def rank_gufunc(x):
''' Returns ranked data along specified dimension '''
import bottleneck
ranks = bottleneck.nanrankdata(x,axis=-1)
ranks = ranks[...,0]
return ranks
def compute_rank(da_1, da_2, over_dim):
''' Feeds forecast and observation data to ufunc that ranks data along specified dimension'''
# Add 'ensemble' coord to obs if one does not exist -----
if over_dim not in da_2.coords:
da_2_pass = da_2.copy()
da_2_pass.coords[over_dim] = -1
da_2_pass = da_2_pass.expand_dims(over_dim)
else:
da_2_pass = da_2.copy()
# Only keep and combine instances that appear in both dataarrays (excluding the ensemble dim) -----
aligned = xr.align(da_2_pass, da_1, join='inner', exclude=over_dim)
combined = xr.concat(aligned, dim=over_dim)
return xr.apply_ufunc(rank_gufunc, combined,
input_core_dims=[[over_dim]],
dask='allowed',
output_dtypes=[int]).rename('rank')
# ===================================================================================================
def categorize(da, bin_edges):
"""
Returns the indices of the bins to which each value in input array belongs
Output indices are such that bin_edges[i-1] <= x < bin_edges[i]
"""
return xr.apply_ufunc(np.digitize, da, bin_edges,
input_core_dims=[[],[]],
dask='allowed',
output_dtypes=[int]).rename('categorized')
# ===================================================================================================
def unstack_and_count(da, dims):
""" Unstacks provided xarray object and returns the total number of elements along dims """
try:
unstacked = da.unstack(da.dims[0])
except ValueError:
unstacked = da
if dims is None:
return ((0 * unstacked) + 1)
else:
return ((0 * unstacked) + 1).sum(dim=dims, skipna=True)
def compute_histogram(da, bin_edges, over_dims):
""" Returns the histogram of data over the specified dimensions """
# To use groupby_bins, da must have a name -----
da = da.rename('data')
hist = da.groupby_bins(da, bins=bin_edges, squeeze=False) \
.apply(unstack_and_count, dims=over_dims) \
.fillna(0) \
.rename({'data_bins' : 'bins'})
hist['bins'] = (bin_edges[0:-1]+bin_edges[1:])/2
# Add nans where data did not fall in any bin -----
return hist.astype(int).where(hist.sum('bins') != 0)
# ===================================================================================================
def calc_gradient(da, dim, x=None):
"""
Returns the gradient computed using second order accurate central differences in the
interior points and either first order accurate one-sided (forward or backwards)
differences at the boundaries
See https://docs.scipy.org/doc/numpy-1.14.0/reference/generated/numpy.gradient.html
"""
# Replace dimension values if specified -----
da_n = da.copy()
if x is None:
x = da_n[dim]
centre_chunk = range(len(x[dim])-2)
f_hd = da_n.shift(**{dim:-2})
f = da_n.shift(**{dim:-1})
f_hs = da_n
hs = x.shift(**{dim:-1}) - x
hd = x.shift(**{dim:-2}) - x.shift(**{dim:-1})
c = (hs ** 2 * f_hd + (hd ** 2 - hs ** 2) * f - hd ** 2 * f_hs) / \
(hs * hd * (hd + hs)).isel(**{dim : centre_chunk})
c[dim] = x[dim][1:-1]
l = (da_n.shift(**{dim:-1}) - da_n).isel(**{dim : 0}) / \
(x.shift(**{dim:-1}) - x).isel(**{dim : 0})
r = (-da_n.shift(**{dim:1}) + da_n).isel(**{dim : -1}) / \
(-x.shift(**{dim:1}) + x).isel(**{dim : -1})
grad = xr.concat([l, c, r], dim=dim)
grad[dim] = da[dim]
return grad
# ===================================================================================================
def bias_correct_ms(da_biased, da_target, da_target_clim=None, init_date_name='init_date',
lead_time_name='lead_time'):
"""
Adjusts, per month and lead time, the mean and standard deviation of da_biased to match that
of da_target.
Author: Dougie Squire
Date: 01/09/2018
Parameters
----------
da_biased : xarray DataArray
Array containing values to be corrected. The time information of this array is anticipated
in a lead_time/inital_date format
da_target : xarray DataArray
Array containing values to use for the correction.
da_target_clim : xarray DataArray, optional
Array containing a climatology of da_target. If da_target_clim is provided, this function
returns both the corrected full field and the anomalies. Otherwise, returns only the
anomalies
init_date_name : str, optional
Name of initial date dimension
lead_time_name : str, optional
Name of lead time dimension
Returns
-------
corrected : xarray DataArray
Bias corrected array
Examples
--------
>>> biased = xr.DataArray(np.random.normal(size=(48,6)),
... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')),
... ('lead_time', np.arange(6))])
>>> biased['lead_time'].attrs['units'] = 'M'
>>> target = xr.DataArray(np.random.normal(size=(48)),
... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))])
>>> doppyo.utils.bias_correct_ms(biased, target)
<xarray.DataArray (init_date: 48, lead_time: 6)>
array([[ 9.336394e-02, 1.133997e-01, -5.851293e-01, -4.908594e-02,
7.952765e-01, 5.325052e-01],
[-1.131123e+00, 1.603380e-01, -1.626906e+00, -1.811439e+00,
-1.653359e-01, -1.871170e-01],
[ 6.515435e-01, -1.064662e+00, 2.249610e+00, 6.881682e-01,
-1.831233e-01, -1.159470e+00],
...,
[-2.096226e+00, 3.143062e-04, 3.603787e-01, -1.515535e+00,
5.421578e-02, -6.446119e-01],
[-8.186274e-01, -9.858171e-01, 1.933307e+00, 5.227265e-02,
5.443201e-01, -7.059492e-01],
[ 2.253396e-02, 2.238470e+00, 1.138728e-01, -3.617103e-01,
1.678223e+00, -2.413158e+00]])
Coordinates:
* lead_time (lead_time) int64 0 1 2 3 4 5
* init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31
Notes
-----------
Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean and standard
deviation to be computed reliably
"""
def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):
""" Groups provided array by lead time and computes mean """
return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)
def _groupby_lead_and_std(da, over_dims, init_date_name, lead_time_name):
""" Groups provided array by lead time and computes standard deviation """
return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).std(over_dims, skipna=True)
def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):
""" Unstacks and adjusts input array by a constant shift as a function of month """
da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)
the_month = np.ndarray.flatten(da_us.month.values)
the_month = int(np.unique(the_month[~np.isnan(the_month)]))
return da_us - shift.sel(month=the_month)
def _unstack_and_scale_per_month(da, scale, init_date_name, lead_time_name):
""" Unstacks and scales input array by a constant value as a function of month """
da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)
the_month = np.ndarray.flatten(da_us.month.values)
the_month = int(np.unique(the_month[~np.isnan(the_month)]))
return da_us * scale.sel(month=the_month)
def _scale_per_month(da, scale):
""" Scales input array by a constant value as a function of month """
return da.groupby('time.month') * scale
_anomalize = lambda data, clim: datetime_to_leadtime(
anomalize(
leadtime_to_datetime(data),clim))
_rescale = lambda da, scale : datetime_to_leadtime(
_scale_per_month(
leadtime_to_datetime(da), scale))
da_biased = da_biased.copy()
da_target = da_target.copy()
month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12
month = month.where(month != 0, 12)
# Correct the mean -----
da_biased.coords['month'] = month
try:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],
init_date_name=init_date_name, lead_time_name=lead_time_name)
except ValueError:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,
init_date_name=init_date_name, lead_time_name=lead_time_name)
if da_target_clim is not None:
da_target_mean = da_target.groupby('time.month').mean('time')
da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_meancorr[lead_time_name] = da_biased[lead_time_name]
da_meancorr.coords['month'] = month
# Compute the corrected anomalies -----
da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)
da_anom_meancorr.coords['month'] = month
else:
da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]
da_anom_meancorr.coords['month'] = month
# Correct the standard deviation -----
try:
da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=[init_date_name,'ensemble'],
init_date_name=init_date_name, lead_time_name=lead_time_name)
except ValueError:
da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=init_date_name,
init_date_name=init_date_name, lead_time_name=lead_time_name)
try:
da_target_std = da_target.sel(lat=da_biased.lat, lon=da_biased.lon).groupby('time.month').std('time')
except:
da_target_std = da_target.groupby('time.month').std('time')
da_anom_stdcorr_tmp = da_anom_meancorr.groupby('month').apply(_unstack_and_scale_per_month,
scale=(da_target_std / da_biased_std_tmp),
init_date_name=init_date_name,
lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_anom_stdcorr_tmp[lead_time_name] = da_biased[lead_time_name]
da_anom_stdcorr_tmp.coords['month'] = month
# This will "squeeze" each pdf at each lead time appropriately. However, the total variance across all leads for
# a given month will now be incorrect. Thus, we now rescale as a function of month only
try:
da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std(['time','ensemble'])
except ValueError:
da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std('time')
da_anom_stdcorr = da_anom_stdcorr_tmp.groupby(init_date_name).apply(_rescale, scale=(da_target_std / da_biased_std))
if da_target_clim is not None:
da_stdcorr = da_anom_stdcorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)
return da_stdcorr.drop('month'), da_anom_stdcorr.drop('month')
else:
return da_anom_stdcorr.drop('month')
# ===================================================================================================
def bias_correct_m(da_biased, da_target, da_target_clim=None, init_date_name='init_date',
lead_time_name='lead_time'):
"""
Adjusts, per month and lead time, the mean of da_biased to match that of da_target
Author: Dougie Squire
Date: 01/09/2018
Parameters
----------
da_biased : xarray DataArray
Array containing values to be corrected. The time information of this array is anticipated
in a lead_time/inital_date format
da_target : xarray DataArray
Array containing values to use for the correction.
da_target_clim : xarray DataArray, optional
Array containing a climatology of da_target. If da_target_clim is provided, this function
returns both the corrected full field and the anomalies. Otherwise, returns only the
anomalies
init_date_name : str, optional
Name of initial date dimension
lead_time_name : str, optional
Name of lead time dimension
Returns
-------
corrected : xarray DataArray
Bias corrected array
Examples
--------
>>> biased = xr.DataArray(np.random.normal(size=(48,6)),
... coords=[('init_date', pd.date_range(start='1/1/2018', periods=48, freq='M')),
... ('lead_time', np.arange(6))])
>>> biased['lead_time'].attrs['units'] = 'M'
>>> target = xr.DataArray(np.random.normal(size=(48)),
... coords=[('time', pd.date_range(start='1/1/2000', periods=48, freq='M'))])
>>> doppyo.utils.bias_correct_m(biased, target)
<xarray.DataArray (init_date: 48, lead_time: 6)>
array([[ 0.541226, 0.693622, -0.367322, 0.820282, 0.111487, 0.078355],
[-0.299829, 0.164297, -0.976883, 0.463365, -0.26428 , -0.536119],
[ 0.078832, -0.260615, -0.235059, -0.349185, 0.567183, -1.543395],
...,
[ 0.335494, -1.121158, 1.313004, 0.604279, 0.135053, 0.031851],
[ 0.33103 , 0.876521, -0.980873, 0.640328, 1.053691, 0.166768],
[ 1.207329, 0.021916, 0.210883, -0.189922, 0.075786, 0.047616]])
Coordinates:
* init_date (init_date) datetime64[ns] 2018-01-31 2018-02-28 ... 2021-12-31
* lead_time (lead_time) int64 0 1 2 3 4 5
Notes
-----------
Many years of initial dates (in da_biased) and times (in da_target) must exist for the mean to be
computed reliably
"""
def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):
""" Groups provided array by lead time and computes mean """
return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)
def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):
""" Unstacks and adjusts input array by a constant shift as a function of month """
da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)
the_month = np.ndarray.flatten(da_us.month.values)
the_month = int(np.unique(the_month[~np.isnan(the_month)]))
return da_us - shift.sel(month=the_month)
_anomalize = lambda data, clim: datetime_to_leadtime(
anomalize(
leadtime_to_datetime(data),clim))
da_biased = da_biased.copy()
da_target = da_target.copy()
month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12
month = month.where(month != 0, 12)
# Correct the mean -----
da_biased.coords['month'] = month
try:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],
init_date_name=init_date_name, lead_time_name=lead_time_name)
except ValueError:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,
init_date_name=init_date_name, lead_time_name=lead_time_name)
if da_target_clim is not None:
da_target_mean = da_target.groupby('time.month').mean('time')
da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_meancorr[lead_time_name] = da_biased[lead_time_name]
da_meancorr.coords['month'] = month
# Compute the corrected anomalies -----
da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)
da_anom_meancorr.coords['month'] = month
else:
da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]
da_anom_meancorr.coords['month'] = month
if da_target_clim is not None:
da_meancorrr = da_anom_meancorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)
return da_meancorr.drop('month'), da_anom_meancorr.drop('month')
else:
return da_anom_meancorr.drop('month')
# ===================================================================================================
def conditional_bias_correct(da_cmp, da_ref, over_dims):
"""
Return conditional bias corrected data using the approach of Goddard et al. 2013
"""
cc = skill.compute_Pearson_corrcoef(da_cmp.mean('ensemble'), da_ref, over_dims=over_dims, subtract_local_mean=False)
correct_cond_bias = (da_ref.std(over_dims) / da_cmp.mean('ensemble').std(over_dims)) * cc
return da_cmp * correct_cond_bias
# ===================================================================================================
def trunc_time(time, freq):
"""
Truncates values in provided time array to provided frequency. E.g. 2018-01-15T12:00 with
freq = 'M' becomes 2018-01-01.
"""
return time.astype('<M8[' + freq + ']')
# ===================================================================================================
def month_delta(date_in, delta, trunc_to_start=False):
""" Increments provided datetime64 array by delta months """
date_mod = pd.Timestamp(date_in)
m, y = (date_mod.month + delta) % 12, date_mod.year + ((date_mod.month) + delta - 1) // 12
if not m: m = 12
d = min(date_mod.day, [31,
29 if y % 4 == 0 and not y % 400 == 0 else 28,31,30,31,30,31,31,30,31,30,31][m - 1])
if trunc_to_start:
date_out = trunc_time(np.datetime64(date_mod.replace(day=d,month=m, year=y)),'M')
else:
date_out = np.datetime64(date_mod.replace(day=d,month=m, year=y))
return np.datetime64(date_out,'ns')
# ===================================================================================================
def year_delta(date_in, delta, trunc_to_start=False):
""" Increments provided datetime64 array by delta years """
date_mod = month_delta(date_in, 12 * delta)
if trunc_to_start:
date_out = trunc_time(date_mod,'Y')
else: date_out = date_mod
return date_out
# ===================================================================================================
def datetime_to_leadtime(data_in):
""" Converts time information from single datetime dimension to init_date/lead_time dimension pair """
init_date = data_in.time.values[0]
lead_times = range(len(data_in.time))
try:
freq = pd.infer_freq(data_in.time.values)
# If pandas tries to assign start time to frequency (e.g. QS-OCT), remove this -----
if '-' in freq:
freq = freq[:freq.find('-')]
# Split frequency into numbers and strings -----
incr_string = ''.join([i for i in freq if i.isdigit()])
freq_incr = [int(incr_string) if incr_string else 1][0]
freq_type = ''.join([i for i in freq if not i.isdigit()])
# Specify all lengths great than 1 month in months -----
if 'QS' in freq_type:
freq = str(3*freq_incr) + 'MS'
elif 'Q' in freq_type:
freq = str(3*freq_incr) + 'M'
elif ('YS' in freq_type) | ('AS' in freq_type):
freq = str(12*freq_incr) + 'MS'
elif ('Y' in freq_type) | ('A' in freq_type):
freq = str(12*freq_incr) + 'M'
except ValueError:
dt = (data_in.time.values[1] - data_in.time.values[0]) / np.timedelta64(1, 's')
month = data_in.time.dt.month[0]
if dt == 60*60*24:
freq = 'D'
elif ((month == 1) | (month == 3) | (month == 5) | (month == 7) | (month == 8) | (month == 10) |
(month == 12)) & (dt == 31*60*60*24):
freq = 'MS'
elif ((month == 4) | (month == 6) | (month == 9) | (month == 11)) & (dt == 30*60*60*24):
freq = 'MS'
elif (month == 2) & ((dt == 28*60*60*24) | (dt == 29*60*60*24)):
freq = 'MS'
elif (dt == 365*60*60*24) | (dt == 366*60*60*24):
freq = 'A'
else:
freq = 'NA'
data_out = data_in.rename({'time' : 'lead_time'})
data_out['lead_time'] = lead_times
data_out['lead_time'].attrs['units'] = freq
data_out.coords['init_date'] = init_date
return data_out
# ===================================================================================================
def leadtime_to_datetime(data_in, init_date_name='init_date', lead_time_name='lead_time'):
""" Converts time information from lead time/initial date dimension pair to single datetime dimension """
try:
init_date = data_in[init_date_name].values[0]
except IndexError:
init_date = data_in[init_date_name].values
lead_times = list(map(int, data_in[lead_time_name].values))
freq = data_in[lead_time_name].attrs['units']
# # Split frequency into numbers and strings -----
# incr_string = ''.join([i for i in freq if i.isdigit()])
# freq_incr = [int(incr_string) if incr_string else 1][0]
# freq_type = ''.join([i for i in freq if not i.isdigit()])
# Deal with special cases of monthly and yearly frequencies -----
# if 'M' in freq_type:
# datetimes = np.array([month_delta(init_date, freq_incr * ix) for ix in lead_times])
# elif ('A' in freq_type) | ('Y' in freq_type):
# datetimes = np.array([year_delta(init_date, freq_incr * ix) for ix in lead_times])
# else:
# datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values
datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values
data_out = data_in.drop(init_date_name)
data_out = data_out.rename({lead_time_name : 'time'})
data_out['time'] = datetimes
return prune(data_out)
# ===================================================================================================
def get_nearest_point(da, lat, lon):
""" Returns the nearest grid point to the specified lat/lon location """
return da.sel(lat=lat,lon=lon,method='nearest')
# ===================================================================================================
# visualization tools
# ===================================================================================================
def plot_fields(data, title=None, headings=None, ncol=2, contour=False, vlims=None, clims=None, squeeze_row=1,
squeeze_col=1, squeeze_cbar=1, shift_cbar=1, cmap='viridis', fontsize=12, invert=False):
""" Plots tiles of figures """
def _depth(seq):
for level in count():
if not seq:
return level
seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence)))
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('font', serif='Helvetica')
matplotlib.rc('text', usetex='false')
matplotlib.rcParams.update({'font.size': fontsize})
nrow = int(np.ceil(len(data)/ncol));
fig = plt.figure(figsize=(11*squeeze_col, nrow*4*squeeze_row))
if (clims is not None) & (np.shape(vlims) != np.shape(clims)):
raise ValueError('The input clims must be equal in size to vlims')
# Check if vlims are given per figure or for all figures -----
one_cbar = False
if vlims is None:
vlims = [[None, None]] * len(data)
if _depth(vlims) == 1:
one_cbar = True
over_count = 1
for idx,dat in enumerate(data):
if one_cbar:
vmin, vmax = vlims
if clims is not None:
cmin, cmax = clims
else:
vmin, vmax = vlims[idx]
if clims is not None:
cmin, cmax = clims[idx]
if ('lat' in dat.dims) and ('lon' in dat.dims):
trans = cartopy.crs.PlateCarree()
ax = plt.subplot(nrow, ncol, over_count, projection=cartopy.crs.PlateCarree(central_longitude=180))
extent = [dat.lon.min(), dat.lon.max(),
dat.lat.min(), dat.lat.max()]
if contour is True:
if clims is not None:
ax.coastlines(color='gray')
im = ax.contourf(dat.lon, dat.lat, dat, levels=np.linspace(vmin,vmax,12), origin='lower', transform=trans,
vmin=vmin, vmax=vmax, cmap=cmap)
ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,
vmin=vmin, vmax=vmax, colors='w', linewidths=2)
ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,
vmin=vmin, vmax=vmax, colors='k', linewidths=1)
else:
ax.coastlines(color='black')
im = ax.contourf(dat.lon, dat.lat, dat, origin='lower', transform=trans, vmin=vmin, vmax=vmax,
cmap=cmap)
else:
ax.coastlines(color='black')
im = ax.imshow(dat, origin='lower', extent=extent, transform=trans, vmin=vmin, vmax=vmax, cmap=cmap)
gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True)
gl.xlines = False
gl.ylines = False
gl.xlabels_top = False
if over_count % ncol == 0:
gl.ylabels_left = False
elif (over_count+ncol-1) % ncol == 0:
gl.ylabels_right = False
else:
gl.ylabels_left = False
gl.ylabels_right = False
gl.xlocator = mticker.FixedLocator([-90, 0, 90, 180])
gl.ylocator = mticker.FixedLocator([-90, -60, 0, 60, 90])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if not one_cbar:
cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)
tick_locator = mticker.MaxNLocator(nbins=6)
cbar.locator = tick_locator
cbar.update_ticks()
if headings is not None:
cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);
elif headings is not None:
ax.set_title(headings[idx], fontsize=fontsize)
else:
ax = plt.subplot(nrow, ncol, over_count)
if 'lat' in dat.dims:
x_plt = dat['lat']
y_plt = dat[utils.get_other_dims(dat,'lat')[0]]
# if dat.get_axis_num('lat') > 0:
# dat = dat.transpose()
elif 'lon' in dat.dims:
x_plt = dat['lon']
y_plt = dat[utils.get_other_dims(dat,'lon')[0]]
# if dat.get_axis_num('lon') > 0:
# dat = dat.transpose()
else:
x_plt = dat[dat.dims[1]]
y_plt = dat[dat.dims[0]]
extent = [x_plt.min(), x_plt.max(),
y_plt.min(), y_plt.max()]
if contour is True:
if clims is not None:
im = ax.contourf(x_plt, y_plt, dat, levels=np.linspace(vmin,vmax,12), vmin=vmin, vmax=vmax,
cmap=cmap)
ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='w', linewidths=2)
ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='k', linewidths=1)
else:
im = ax.contourf(x_plt, y_plt, dat, vmin=vmin, vmax=vmax, cmap=cmap)
else:
im = ax.imshow(dat, origin='lower', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap)
if over_count % ncol == 0:
ax.yaxis.tick_right()
elif (over_count+ncol-1) % ncol == 0:
ax.set_ylabel(y_plt.dims[0], fontsize=fontsize)
else:
ax.set_yticks([])
if idx / ncol >= nrow - 1:
ax.set_xlabel(x_plt.dims[0], fontsize=fontsize)
if not one_cbar:
cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)
tick_locator = mticker.MaxNLocator(nbins=6)
cbar.locator = tick_locator
cbar.update_ticks()
if headings is not None:
cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);
elif headings is not None:
ax.set_title(headings[idx], fontsize=fontsize)
if invert:
ax.invert_yaxis()
over_count += 1
plt.tight_layout()
if one_cbar:
vmin, vmax = vlims
fig.subplots_adjust(bottom=shift_cbar*0.16)
cbar_ax = fig.add_axes([0.15, 0.13, 0.7, squeeze_cbar*0.020])
cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal');
cbar_ax.set_xlabel(title, rotation=0, labelpad=15, fontsize=fontsize);
cbar.set_ticks(np.linspace(vmin,vmax,5))
elif title is not None:
fig.suptitle(title, y=1)
# ===================================================================================================
def size_GB(xr_object):
"""
How many GB (or GiB) is your xarray object?
// Requires an xarray object
// Returns:
* equivalent GB (GBytes) - 10^9 conversion
* equivalent GiB (GiBytes) - 2^ 30 conversion
< Thomas Moore - thomas.moore@csiro.au - 10102018 >
"""
bytes = xr_object.nbytes
Ten2the9 = 10**9
Two2the30 = 2**30
GBytes = bytes / Ten2the9
GiBytes = bytes / Two2the30
#print out results
print(xr_object.name, "is", GBytes, "GB", 'which is', GiBytes,"GiB")
return GBytes,GiBytes
# ===================================================================================================
def get_pres_name(da):
"""
Returns name of pressure dimension in input array
Author: Dougie Squire
Date: 03/03/2018
Parameters
----------
da : xarray DataArray
Array with coordinate corresponding to pressure
Returns
-------
name : str
Name of dimension corresponding to pressure
Examples
--------
>>> A = xr.DataArray(np.random.normal(size=(2,2,2,2,2)),
... coords=[('lat', np.arange(2)), ('lon', np.arange(2)),
... ('depth', np.arange(2)), ('level', np.arange(2)),
... ('pfull', np.arange(2))])
>>> doppyo.utils.get_pres_name(A)
'pfull'
"""
if 'pfull' in da.dims:
return 'pfull'
elif 'phalf' in da.dims:
return 'phalf'
else:
raise KeyError('Unable to determine pressure dimension')
pass
# ===================================================================================================
def did_event(da, event):
"""
Returns array containing True/False where event occurs/does not occur
Notes
-----
See http://www.cawcr.gov.au/projects/verification/
"""
eval_expr = event.replace(">", "da >").replace("<", "da <").replace("==", "da ==") \
.replace("=", "da ==").replace('&&', '&').replace('||', '|') \
.replace("and", "&").replace("or", "|")
eval_expr = '(' + eval_expr + ').rename("event_logical")'
return eval(eval_expr)
# ===================================================================================================
def compute_likelihood(da_logical, dim='ensemble'):
"""
Returns array of likelihoods computed along dim from logical event data
Notes
-----
See http://www.cawcr.gov.au/projects/verification/
"""
if dim == None:
likelihood = da_logical
else:
likelihood = da_logical.mean(dim=dim).rename('likelihood')
return likelihood
# ===================================================================================================
def atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20,
integrate=True, loop_triple_terms=False, lat_name=None, lon_name=None,
plevel_name=None):
"""
Returns all terms in the Lorenz energy cycle. Follows formulae and notation used in `Marques
et al. 2011 Global diagnostic energetics of five state-of-the-art climate models. Climate
Dynamics`. Note that this decomposition is in the space domain. A space-time decomposition
can also be carried out (though not in Fourier space, but this is not implemented here (see
`Oort. 1964 On Estimates of the atmospheric energy cycle. Monthly Weather Review`).
Parameters
----------
temp : xarray DataArray
Array containing fields of temperature with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
u : xarray DataArray
Array containing fields of zonal velocity with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
v : xarray DataArray
Array containing fields of meridional velocity with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
omega : xarray DataArray
Array containing fields of vertical velocity (pressure coordinates) with at least coordinates
latitude, longitude and level (following standard naming - see Limitations)
gh : xarray DataArray
Array containing fields of geopotential height with at least coordinates latitude, longitude
and level (following standard naming - see Limitations)
terms : str or sequence of str
List of terms to compute. If None, returns all terms. Available options are:
Pz; total available potential energy in the zonally averaged temperature distribution
Kz; total kinetic energy in zonally averaged motion
Pe; total eddy available potential energy [= sum_n Pn (n > 0 only) for spectral=True] (Note that
for spectral=True, an additional term, Sn, quantifying the rate of transfer of available potential
energy to eddies of wavenumber n from eddies of all other wavenumbers is also returned)
Ke; total eddy kinetic energy [= sum_n Kn (n > 0 only) for spectral=True] (Note that for
spectral=True, an additional term, Ln, quantifying the rate of transfer of kinetic energy to eddies
of wavenumber n from eddies of all other wavenumbers is also returned)
Cz; rate of conversion of zonal available potential energy to zonal kinetic energy
Ca; rate of transfer of total available potential energy in the zonally averaged temperature
distribution (Pz) to total eddy available potential energy (Pe) [= sum_n Rn (n > 0 only) for
spectral=True]
Ce; rate of transfer of total eddy available potential energy (Pe) to total eddy kinetic energy
(Ke) [= sum_n Cn (n > 0 only) for spectral=True]
Ck; rate of transfer of total eddy kinetic energy (Ke) to total kinetic energy in zonally
averaged motion (Kz) [= sum_n Mn (n > 0 only) for spectral=True]
Gz; rate of generation of zonal available potential energy due to the zonally averaged heating (Pz).
Note that this term is computed as a residual (Cz + Ca) and cannot be returned in spectral space.
If Gz is requested with spectral=True, Gz is returned in real-space only
Ge; rate of generation of eddy available potential energy (Pe). Note that this term is computed as
a residual (Ce - Ca) and cannot be returned in spectral space. If Ge is requested with spectral=True,
Ge is returned in real-space only
Dz; rate of viscous dissipation of zonal kinetic energy (Kz). Note that this term is computed as a
residual (Cz - Ck) and cannot be returned in spectral space. If Dz is requested with spectral=True, Dz
is returned in real-space only
De; rate of dissipation of eddy kinetic energy (Ke). Note that this term is computed as a residual
(Ce - Ck) and cannot be returned in spectral space. If De is requested with spectral=True, De is
returned in real-space only
vgradz : bool, optional
If True, uses `v-grad-z` approach for computing terms relating to conversion
of potential energy to kinetic energy. Otherwise, defaults to using the
`omaga-alpha` approach (see reference above for details)
spectral : bool, optional
If True, computes all terms as a function of wavenumber on longitudinal bands. To use this
option, longitudes must be regularly spaced. Note that Ge and De are computed as residuals and
cannot be computed in spectral space
n_wavenumbers : int, optional
Number of wavenumbers to retain either side of wavenumber=0. Obviously only does anything if
spectral=True
integrate : bool, optional
If True, computes and returns the integral of each term over the mass of the
atmosphere. Otherwise, only the integrands are returned.
Returns
-------
atmos_energy_cycle : xarray Dataset
Limitations
-----------
All input array coordinates must follow standard naming (see doppyo.utils.get_lat_name(),
doppyo.utils.get_lon_name(), etc)
Pressure levels must be provided in units of hPa
Notes
-----
The following notation is used below (stackable, e.g. *_ZT indicates the time average of the zonal
average):
*_A -> area average over an isobaric surface
*_a -> departure from area average
*_Z -> zonal average
*_z -> departure from zonal average
*_T -> time average
*_t -> departure from time average
Additionally, capital variables indicate Fourier transforms:
F(u) = U
F(v) = V
F(omega) = O
F(gh) = A
F(temp) = B
"""
def _flip_n(da):
""" Flips data along wavenumber coordinate """
daf = da.copy()
daf['n'] = -daf['n']
return daf.sortby(daf['n'])
def _truncate(F, n_truncate, dim):
"""
Converts spatial frequency dim to wavenumber, n, and truncates all wavenumbers greater than
n_truncate
"""
F[dim] = 360 * F[dim]
F = F.rename({dim : 'n'})
F = F.where(abs(F.n) <= n_truncate, drop=True)
return F, _flip_n(F)
def _triple_terms(A, B, C):
"""
Calculate triple term summation of the form \int_{m=-inf}^{inf} A(m) * B(n) * C(n - m)
"""
# Use rolling operator to build shifted terms -----
Am = A.rename({'n' : 'm'})
Cnm = C.rolling(n=len(C.n), center=True).construct('m', fill_value=0)
Cnm['m'] = -C['n'].values
# Drop m = 0 and n < 0 -----
Am = Am.where(Am['m'] != 0, drop=True)
Cnm = Cnm.where(Cnm['m'] != 0, drop=True)
return (B * (Am * Cnm)).sum(dim='m', skipna=False)
def _triple_terms_loop(A, B, C):
"""
Calculate triple term summation of the form \int_{m=-inf}^{inf} A(m) * B(n) * C(n - m)
"""
# Loop over all m's and perform rolling sum -----
ms = A['n'].where(A['n'] != 0, drop=True).values
ABC = A.copy() * 0
for m in ms:
Am = A.sel(n=m)
Cnm = C.shift(n=int(m)).fillna(0)
ABC = ABC + (Am * B * Cnm)
return ABC
if terms is None:
terms = ['Pz', 'Kz', 'Pe', 'Ke', 'Cz', 'Ca', 'Ce', 'Ck', 'Gz', 'Ge', 'Dz', 'De']
if isinstance(terms, str):
terms = [terms]
# Initialize some things -----
if lat_name is None:
lat_name = utils.get_lat_name(temp)
if lon_name is None:
lon_name = utils.get_lon_name(temp)
if plevel_name is None:
plevel_name = utils.get_plevel_name(temp)
degtorad = utils.constants().pi / 180
tan_lat = xr.ufuncs.tan(temp[lat_name] * degtorad)
cos_lat = xr.ufuncs.cos(temp[lat_name] * degtorad)
# Determine the stability parameter using Saltzman's approach -----
kappa = utils.constants().R_d / utils.constants().C_pd
p_kap = (1000 / temp[plevel_name]) ** kappa
theta_A = utils.average(temp * p_kap, [lat_name, lon_name], weights=cos_lat)
dtheta_Adp = utils.differentiate_wrt(theta_A, dim=plevel_name, x=(theta_A[plevel_name] * 100))
gamma = - p_kap * (utils.constants().R_d) / ((temp[plevel_name] * 100) * utils.constants().C_pd) / dtheta_Adp # [1/K]
energies = gamma.rename('gamma').to_dataset()
# Compute zonal terms
# ========================
if ('Pz' in terms):
# Compute the total available potential energy in the zonally averaged temperature
# distribution, Pz [also commonly called Az] -----
temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)
temp_Z = temp.mean(dim=lon_name)
temp_Za = temp_Z - temp_A
Pz_int = gamma * utils.constants().C_pd / 2 * temp_Za ** 2 # [J/kg]
energies['Pz_int'] = Pz_int
if integrate:
Pz = _int_over_atmos(Pz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]
energies['Pz'] = Pz
if ('Kz' in terms):
# Compute the total kinetic energy in zonally averaged motion, Kz [also commonly
# called Kz] -----
u_Z = u.mean(dim=lon_name)
v_Z = v.mean(dim=lon_name)
Kz_int = 0.5 * (u_Z ** 2 + v_Z ** 2) # [J/kg]
energies['Kz_int'] = Kz_int
if integrate:
Kz = _int_over_atmos(Kz_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]
energies['Kz'] = Kz
if ('Cz' in terms):
# Compute the rate of conversion of zonal available potential energy (Pz) to zonal kinetic
# energy (Kz), Cz [also commonly called Cz] -----
if vgradz:
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
gh_Z = gh.mean(dim=lon_name)
dghdlat = utils.differentiate_wrt(gh_Z, dim=lat_name, x=(gh_Z[lat_name] * degtorad))
Cz_int = - (utils.constants().g / utils.constants().R_earth) * v_Z * dghdlat # [W/kg]
energies['Cz_int'] = Cz_int
if integrate:
Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=gh[lon_name]) # [W/m^2]
energies['Cz'] = Cz
else:
if 'temp_Za' not in locals():
temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)
temp_Z = temp.mean(dim=lon_name)
temp_Za = temp_Z - temp_A
omega_A = utils.average(omega, [lat_name, lon_name], weights=cos_lat)
omega_Z = omega.mean(dim=lon_name)
omega_Za = omega_Z - omega_A
Cz_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * omega_Za * temp_Za # [W/kg]
energies['Cz_int'] = Cz_int
if integrate:
Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=omega[lon_name]) # [W/m^2]
energies['Cz'] = Cz
# Compute eddy terms in Fourier space if spectral=True
# ==========================================================
if spectral:
if ('Pe' in terms):
# Compute the total available potential energy eddies of wavenumber n, Pn -----
Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) /
len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Pn_int = (gamma * utils.constants().C_pd * abs(Bp) ** 2)
energies['Pn_int'] = Pn_int
if integrate:
Pn = _int_over_atmos(Pn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]
energies['Pn'] = Pn
# Compute the rate of transfer of available potential energy to eddies of
# wavenumber n from eddies of all other wavenumbers, Sn -----
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dBpdlat = utils.differentiate_wrt(Bp, dim=lat_name, x=(Bp[lat_name] * degtorad))
dBndlat = utils.differentiate_wrt(Bn, dim=lat_name, x=(Bn[lat_name] * degtorad))
dBpdp = utils.differentiate_wrt(Bp, dim=plevel_name, x=(Bp[plevel_name] * 100))
dBndp = utils.differentiate_wrt(Bn, dim=plevel_name, x=(Bn[plevel_name] * 100))
if loop_triple_terms:
BpBnUp = _triple_terms_loop(Bp, Bn, Up)
BpBpUn = _triple_terms_loop(Bp, Bp, Un)
BpglBnVp = _triple_terms_loop(Bp, dBndlat, Vp)
BpglBpVn = _triple_terms_loop(Bp, dBpdlat, Vn)
BpgpBnOp = _triple_terms_loop(Bp, dBndp, Op)
BpgpBpOn = _triple_terms_loop(Bp, dBpdp, On)
BpBnOp = _triple_terms_loop(Bp, Bn, Op)
BpBpOn = _triple_terms_loop(Bp, Bp, On)
else:
BpBnUp = _triple_terms(Bp, Bn, Up)
BpBpUn = _triple_terms(Bp, Bp, Un)
BpglBnVp = _triple_terms(Bp, dBndlat, Vp)
BpglBpVn = _triple_terms(Bp, dBpdlat, Vn)
BpgpBnOp = _triple_terms(Bp, dBndp, Op)
BpgpBpOn = _triple_terms(Bp, dBpdp, On)
BpBnOp = _triple_terms(Bp, Bn, Op)
BpBpOn = _triple_terms(Bp, Bp, On)
Sn_int = -gamma * utils.constants().C_pd * (1j * Bp['n']) / \
(utils.constants().R_earth * xr.ufuncs.cos(Bp[lat_name] * degtorad)) * \
(BpBnUp + BpBpUn) + \
gamma * utils.constants().C_pd / utils.constants().R_earth * \
(BpglBnVp + BpglBpVn) + \
gamma * utils.constants().C_pd * (BpgpBnOp + BpgpBpOn) + \
gamma * utils.constants().R_d / Bp[plevel_name] * \
(BpBnOp + BpBpOn)
energies['Sn_int'] = Sn_int
if integrate:
Sn = abs(_int_over_atmos(Sn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]
energies['Sn'] = Sn
if ('Ke' in terms):
# Compute the total kinetic energy in eddies of wavenumber n, Kn -----
if 'U' not in locals():
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Kn_int = abs(Up) ** 2 + abs(Vp) ** 2
energies['Kn_int'] = Kn_int
if integrate:
Kn = _int_over_atmos(Kn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]
energies['Kn'] = Kn
# Compute the rate of transfer of kinetic energy to eddies of wavenumber n from
# eddies of all other wavenumbers, Ln -----
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dUpdp = utils.differentiate_wrt(Up, dim=plevel_name, x=(Up[plevel_name] * 100))
dVpdp = utils.differentiate_wrt(Vp, dim=plevel_name, x=(Vp[plevel_name] * 100))
dOpdp = utils.differentiate_wrt(Op, dim=plevel_name, x=(Op[plevel_name] * 100))
dOndp = utils.differentiate_wrt(On, dim=plevel_name, x=(On[plevel_name] * 100))
dVpcdl = utils.differentiate_wrt(Vp * cos_lat, dim=lat_name, x=(Vp[lat_name] * degtorad))
dVncdl = utils.differentiate_wrt(Vn * cos_lat, dim=lat_name, x=(Vn[lat_name] * degtorad))
dUpdl = utils.differentiate_wrt(Up, dim=lat_name, x=(Up[lat_name] * degtorad))
dVpdl = utils.differentiate_wrt(Vp, dim=lat_name, x=(Vp[lat_name] * degtorad))
if loop_triple_terms:
UpUnUp = _triple_terms_loop(Up, Un, Up)
UpUpUn = _triple_terms_loop(Up, Up, Un)
VpVnUp = _triple_terms_loop(Vp, Vn, Up)
VpVpUn = _triple_terms_loop(Vp, Vp, Un)
VpUnUp = _triple_terms_loop(Vp, Un, Up)
VpUpUn = _triple_terms_loop(Vp, Up, Un)
UpVnUp = _triple_terms_loop(Up, Vn, Up)
UpVpUn = _triple_terms_loop(Up, Vp, Un)
gpUpUngpOp = _triple_terms_loop(dUpdp, Un, dOpdp)
gpUpUpgpOn = _triple_terms_loop(dUpdp, Up, dOndp)
gpVpVngpOp = _triple_terms_loop(dVpdp, Vn, dOpdp)
gpVpVpgpOn = _triple_terms_loop(dVpdp, Vp, dOndp)
glUpUnglVpc = _triple_terms_loop(dUpdl, Un, dVpcdl)
glUpUpglVnc = _triple_terms_loop(dUpdl, Up, dVncdl)
glVpVnglVpc = _triple_terms_loop(dVpdl, Vn, dVpcdl)
glVpVpglVnc = _triple_terms_loop(dVpdl, Vp, dVncdl)
else:
UpUnUp = _triple_terms(Up, Un, Up)
UpUpUn = _triple_terms(Up, Up, Un)
VpVnUp = _triple_terms(Vp, Vn, Up)
VpVpUn = _triple_terms(Vp, Vp, Un)
VpUnUp = _triple_terms(Vp, Un, Up)
VpUpUn = _triple_terms(Vp, Up, Un)
UpVnUp = _triple_terms(Up, Vn, Up)
UpVpUn = _triple_terms(Up, Vp, Un)
gpUpUngpOp = _triple_terms(dUpdp, Un, dOpdp)
gpUpUpgpOn = _triple_terms(dUpdp, Up, dOndp)
gpVpVngpOp = _triple_terms(dVpdp, Vn, dOpdp)
gpVpVpgpOn = _triple_terms(dVpdp, Vp, dOndp)
glUpUnglVpc = _triple_terms(dUpdl, Un, dVpcdl)
glUpUpglVnc = _triple_terms(dUpdl, Up, dVncdl)
glVpVnglVpc = _triple_terms(dVpdl, Vn, dVpcdl)
glVpVpglVnc = _triple_terms(dVpdl, Vp, dVncdl)
Ln_int = -(1j * Up['n']) / (utils.constants().R_earth * cos_lat) * \
(UpUnUp - UpUpUn) + \
(1j * Vp['n']) / (utils.constants().R_earth * cos_lat) * \
(VpVnUp - VpVpUn) - \
tan_lat / utils.constants().R_earth * \
(VpUnUp + VpUpUn) + \
tan_lat / utils.constants().R_earth * \
(UpVnUp + UpVpUn) + \
(gpUpUngpOp + gpUpUpgpOn) + \
(gpVpVngpOp + gpVpVpgpOn) + \
1 / (utils.constants().R_earth * cos_lat) * \
(glUpUnglVpc + glUpUpglVnc + glVpVnglVpc + glVpVpglVnc)
energies['Ln_int'] = Ln_int
if integrate:
Ln = abs(_int_over_atmos(Ln_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]
energies['Ln'] = Ln
if ('Ca' in terms):
# Compute the rate of transfer of zonal available potential energy to eddy
# available potential energy in wavenumber n, Rn -----
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'B' not in locals():
Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) /
len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))
theta = temp * p_kap
theta_Z = theta.mean(dim=lon_name)
theta_Za = theta_Z - theta_A
dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))
Rn_int = gamma * utils.constants().C_pd * ((dtemp_Zdlat / utils.constants().R_earth) * (Vp * Bn + Vn * Bp) +
(p_kap * dtheta_Zadp) * (Op * Bn + On * Bp)) # [W/kg]
energies['Rn_int'] = Rn_int
if integrate:
Rn = abs(_int_over_atmos(Rn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]
energies['Rn'] = Rn
if ('Ce' in terms):
# Compute the rate of conversion of available potential energy of wavenumber n
# to eddy kinetic energy of wavenumber n, Cn -----
if vgradz:
if 'U' not in locals():
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Ap, An = _truncate(utils.fft(gh, dim=lon_name, nfft=len(gh[lon_name]), twosided=True, shift=True) /
len(gh[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dApdlat = utils.differentiate_wrt(Ap, dim=lat_name, x=(Ap[lat_name] * degtorad))
dAndlat = utils.differentiate_wrt(An, dim=lat_name, x=(An[lat_name] * degtorad))
Cn_int = (((-1j * utils.constants().g * Up['n']) / \
(utils.constants().R_earth * xr.ufuncs.cos(Up[lat_name] * degtorad))) * \
(Ap * Un - An * Up)) - \
((utils.constants().g / utils.constants().R_earth) * \
(dApdlat * Vn + dAndlat * Vp)) # [W/kg]
energies['Cn_int'] = Cn_int
if integrate:
Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]
energies['Cn'] = Cn
else:
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'B' not in locals():
Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) /
len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Cn_int = - (utils.constants().R_d / (omega[plevel_name] * 100)) * (Op * Bn + On * Bp) # [W/kg]
energies['Cn_int'] = Cn_int
if integrate:
Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])) # [W/m^2]
energies['Cn'] = Cn
if ('Ck' in terms):
# Compute the rate of transfer of kinetic energy to the zonally averaged flow
# from eddies of wavenumber n, Mn -----
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
if 'u_Z' not in locals():
u_Z = u.mean(dim=lon_name)
if 'U' not in locals():
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v[lat_name] * degtorad))
du_Zndlat = utils.differentiate_wrt(u_Z / xr.ufuncs.cos(u[lat_name] * degtorad),
dim=lat_name, x=(u[lat_name] * degtorad))
dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v[plevel_name] * 100))
du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u[plevel_name] * 100))
Mn_int = (-2 * Up * Un * v_Z * tan_lat / utils.constants().R_earth) + \
(2 * Vp * Vn * dv_Zdlat / utils.constants().R_earth + (Vp * On + Vn * Op) * dv_Zdp) + \
((Up * On + Un * Op) * du_Zdp) + \
((Up * Vn + Un * Vp) * xr.ufuncs.cos(u[lat_name] * degtorad) / \
utils.constants().R_earth * du_Zndlat) # [W/kg]
energies['Mn_int'] = Mn_int
if integrate:
Mn = abs(_int_over_atmos(Mn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])) # [W/m^2]
energies['Mn'] = Mn
else:
if ('Pe' in terms):
# Compute the total eddy available potential energy, Pe [also commonly called
# Ae] -----
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
temp_z = temp - temp_Z
Pe_int = gamma * utils.constants().C_pd / 2 * (temp_z ** 2).mean(dim=lon_name) # [J/kg]
energies['Pe_int'] = Pe_int
if integrate:
Pe = _int_over_atmos(Pe_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [J/m^2]
energies['Pe'] = Pe
if ('Ke' in terms):
# Compute the total eddy kinetic energy, Ke -----
if 'u_Z' not in locals():
u_Z = u.mean(dim=lon_name)
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
u_z = u - u_Z
v_z = v - v_Z
Ke_int = 0.5 * (u_z ** 2 + v_z ** 2).mean(dim=lon_name) # [J/kg]
energies['Ke_int'] = Ke_int
if integrate:
Ke = _int_over_atmos(Ke_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]) # [J/m^2]
energies['Ke'] = Ke
if ('Ca' in terms):
# Compute the rate of transfer of total available potential energy in the zonally
# averaged temperature distribution (Pz) to total eddy available potential energy
# (Pe), Ca -----
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
if 'omega_Z' not in locals():
omega_Z = omega.mean(dim=lon_name)
if 'theta_Z' not in locals():
theta = temp * p_kap
theta_Z = theta.mean(dim=lon_name)
if 'dtemp_Zdlat' not in locals():
dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))
v_z = v - v_Z
temp_z = temp - temp_Z
omega_z = omega - omega_Z
oT_Z = (omega_z * temp_z).mean(dim=lon_name)
oT_A = utils.average(omega_z * temp_z, [lat_name, lon_name], weights=cos_lat)
oT_Za = oT_Z - oT_A
theta_Za = theta_Z - theta_A
dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))
Ca_int = - gamma * utils.constants().C_pd * \
(((v_z * temp_z).mean(dim=lon_name) * dtemp_Zdlat / utils.constants().R_earth) + \
(p_kap * oT_Za * dtheta_Zadp)) # [W/kg]
energies['Ca_int'] = Ca_int
if integrate:
Ca = _int_over_atmos(Ca_int, lat_name, lon_name, plevel_name, lon_dim=v[lon_name]) # [W/m^2]
energies['Ca'] = Ca
if ('Ce' in terms):
# Compute the rate of transfer of total eddy available potential energy (Pe) to
# total eddy kinetic energy (Ke), Ce -----
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
if 'omega_Z' not in locals():
omega_Z = omega.mean(dim=lon_name)
temp_z = temp - temp_Z
omega_z = omega - omega_Z
Ce_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * \
(omega_z * temp_z).mean(dim=lon_name) # [W/kg]
energies['Ce_int'] = Ce_int
if integrate:
Ce = _int_over_atmos(Ce_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]
energies['Ce'] = Ce
if ('Ck' in terms):
# Compute the rate of transfer of total eddy kinetic energy (Ke) to total kinetic
# energy in zonally averaged motion (Kz), Ck -----
if 'u_Z' not in locals():
u_Z = u.mean(dim=lon_name)
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
if 'omega_Z' not in locals():
omega_Z = omega.mean(dim=lon_name)
u_z = u - u_Z
v_z = v - v_Z
omega_z = omega - omega_Z
du_Zndlat = utils.differentiate_wrt(u_Z / cos_lat, dim=lat_name, x=(u_Z[lat_name] * degtorad))
dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v_Z[lat_name] * degtorad))
du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u_Z[plevel_name] * 100))
dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v_Z[plevel_name] * 100))
Ck_int = (u_z * v_z).mean(dim=lon_name) * cos_lat * du_Zndlat / utils.constants().R_earth + \
(u_z * omega_z).mean(dim=lon_name) * du_Zdp + \
(v_z ** 2).mean(dim=lon_name) * dv_Zdlat / utils.constants().R_earth + \
(v_z * omega_z).mean(dim=lon_name) * dv_Zdp - \
(u_z ** 2).mean(dim=lon_name) * v_Z * tan_lat / utils.constants().R_earth
energies['Ck_int'] = Ck_int
if integrate:
Ck = _int_over_atmos(Ck_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]
energies['Ck'] = Ck
if ('Gz' in terms):
# Compute the rate of generation of zonal available potential energy due to the zonally
# averaged heating, Gz -----
if ('Cz' not in terms) | ('Ca' not in terms):
raise ValueError('The rate of generation of zonal available potential energy, Gz, is computed from the sum of Cz and Ca. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('Rate of generation of zonal available potential energy is computed from the sum of Cz and Ca and cannot be computed in Fourier space. Returning Gz in real-space.')
Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca
Gz_int = Cz_int + Ca_int
energies['Gz_int'] = Gz_int
if integrate:
Gz = _int_over_atmos(Gz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]
energies['Gz'] = Gz
if ('Ge' in terms):
# Compute the rate of generation of eddy available potential energy (Ae), Ge -----
if ('Ce' not in terms) | ('Ca' not in terms):
raise ValueError('The rate of generation of eddy available potential energy, Ge, is computed from the residual of Ce and Ca. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('The rate of generation of eddy available potential energy is computed from the residual of Ce and Ca and cannot be computed in Fourier space. Returning Ge in real-space.')
Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce
if 'Ca_int' not in locals():
Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real # sum Rn to get Ca
Ge_int = Ce_int - Ca_int
energies['Ge_int'] = Ge_int
if integrate:
Ge = _int_over_atmos(Ge_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]
energies['Ge'] = Ge
if ('Dz' in terms):
# Compute the rate of viscous dissipation of zonal kinetic energy, Dz -----
if ('Cz' not in terms) | ('Ck' not in terms):
raise ValueError('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck and cannot be computed in Fourier space. Returning De in real-space.')
Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck
Dz_int = Cz_int - Ck_int
energies['Dz_int'] = Dz_int
if integrate:
Dz = _int_over_atmos(Dz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]
energies['Dz'] = Dz
if ('De' in terms):
# Compute the rate of dissipation of eddy kinetic energy (Ke), De -----
if ('Ce' not in terms) | ('Ck' not in terms):
raise ValueError('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck and cannot be computed in Fourier space. Returning De in real-space.')
if 'Ce_int' not in locals():
Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real # sum Cn to get Ce
if 'Ck_int' not in locals():
Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real # sum Mn to get Ck
De_int = Ce_int - Ck_int
energies['De_int'] = De_int
if integrate:
De = _int_over_atmos(De_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]) # [W/m^2]
energies['De'] = De
return energies
# ===================================================================================================
def auto_merge(paths, preprocess=None, parallel=True, **kwargs):
"""
Automatically merge a split xarray Dataset. This is designed to behave like
`xarray.open_mfdataset`, except it supports concatenation along multiple
dimensions.
Parameters
----------
datasets : str or list of str or list of xarray.Dataset
Either a glob expression or list of paths as you would pass to
xarray.open_mfdataset, or a list of xarray datasets. If a list of
datasets is passed, you should make sure that they are represented
as dask arrays to avoid reading the whole dataset into memory.
Returns
-------
xarray.Dataset
The merged dataset.
"""
if parallel:
# wrap the open_dataset, getattr, and preprocess with delayed
open_ = dask.delayed(xr.open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **kwargs) for p in paths]
file_objs = [getattr_(ds, '_file_obj') for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
# calling compute here will return the datasets/file_objs lists,
# the underlying datasets will still be stored as dask arrays
datasets, file_objs = dask.compute(datasets, file_objs)
def _combine_along_last_dim(datasets):
merged = []
# Determine the dimension along which the dataset is split
split_dims = [d for d in datasets[0].dims if
len(np.unique([ds[d].values[0] for ds in datasets])) > 1]
# Concatenate along one of the split dimensions
concat_dim = split_dims[-1]
# Group along the remaining dimensions and concatenate within each
# group.
sorted_ds = sorted(datasets, key=lambda ds: tuple(ds[d].values[0]
for d in split_dims))
for _, group in itertools.groupby(
sorted_ds,
key=lambda ds: tuple(ds[d].values[0] for d in split_dims[:-1])
):
merged.append(xr.auto_combine(group, concat_dim=concat_dim))
return merged
merged = datasets
while len(merged) > 1:
merged = _combine_along_last_dim(merged)
return merged[0] | 48.837134 | 201 | 0.555499 |
import numpy as np
import pandas as pd
import xarray as xr
import cartopy
from collections import Sequence
from itertools import chain, count
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as mticker
from cartopy.util import add_cyclic_point
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
from doppyo import utils
def rank_gufunc(x):
import bottleneck
ranks = bottleneck.nanrankdata(x,axis=-1)
ranks = ranks[...,0]
return ranks
def compute_rank(da_1, da_2, over_dim):
if over_dim not in da_2.coords:
da_2_pass = da_2.copy()
da_2_pass.coords[over_dim] = -1
da_2_pass = da_2_pass.expand_dims(over_dim)
else:
da_2_pass = da_2.copy()
aligned = xr.align(da_2_pass, da_1, join='inner', exclude=over_dim)
combined = xr.concat(aligned, dim=over_dim)
return xr.apply_ufunc(rank_gufunc, combined,
input_core_dims=[[over_dim]],
dask='allowed',
output_dtypes=[int]).rename('rank')
def categorize(da, bin_edges):
return xr.apply_ufunc(np.digitize, da, bin_edges,
input_core_dims=[[],[]],
dask='allowed',
output_dtypes=[int]).rename('categorized')
def unstack_and_count(da, dims):
try:
unstacked = da.unstack(da.dims[0])
except ValueError:
unstacked = da
if dims is None:
return ((0 * unstacked) + 1)
else:
return ((0 * unstacked) + 1).sum(dim=dims, skipna=True)
def compute_histogram(da, bin_edges, over_dims):
da = da.rename('data')
hist = da.groupby_bins(da, bins=bin_edges, squeeze=False) \
.apply(unstack_and_count, dims=over_dims) \
.fillna(0) \
.rename({'data_bins' : 'bins'})
hist['bins'] = (bin_edges[0:-1]+bin_edges[1:])/2
return hist.astype(int).where(hist.sum('bins') != 0)
def calc_gradient(da, dim, x=None):
da_n = da.copy()
if x is None:
x = da_n[dim]
centre_chunk = range(len(x[dim])-2)
f_hd = da_n.shift(**{dim:-2})
f = da_n.shift(**{dim:-1})
f_hs = da_n
hs = x.shift(**{dim:-1}) - x
hd = x.shift(**{dim:-2}) - x.shift(**{dim:-1})
c = (hs ** 2 * f_hd + (hd ** 2 - hs ** 2) * f - hd ** 2 * f_hs) / \
(hs * hd * (hd + hs)).isel(**{dim : centre_chunk})
c[dim] = x[dim][1:-1]
l = (da_n.shift(**{dim:-1}) - da_n).isel(**{dim : 0}) / \
(x.shift(**{dim:-1}) - x).isel(**{dim : 0})
r = (-da_n.shift(**{dim:1}) + da_n).isel(**{dim : -1}) / \
(-x.shift(**{dim:1}) + x).isel(**{dim : -1})
grad = xr.concat([l, c, r], dim=dim)
grad[dim] = da[dim]
return grad
def bias_correct_ms(da_biased, da_target, da_target_clim=None, init_date_name='init_date',
lead_time_name='lead_time'):
def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):
return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)
def _groupby_lead_and_std(da, over_dims, init_date_name, lead_time_name):
return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).std(over_dims, skipna=True)
def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):
da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)
the_month = np.ndarray.flatten(da_us.month.values)
the_month = int(np.unique(the_month[~np.isnan(the_month)]))
return da_us - shift.sel(month=the_month)
def _unstack_and_scale_per_month(da, scale, init_date_name, lead_time_name):
da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)
the_month = np.ndarray.flatten(da_us.month.values)
the_month = int(np.unique(the_month[~np.isnan(the_month)]))
return da_us * scale.sel(month=the_month)
def _scale_per_month(da, scale):
return da.groupby('time.month') * scale
_anomalize = lambda data, clim: datetime_to_leadtime(
anomalize(
leadtime_to_datetime(data),clim))
_rescale = lambda da, scale : datetime_to_leadtime(
_scale_per_month(
leadtime_to_datetime(da), scale))
da_biased = da_biased.copy()
da_target = da_target.copy()
month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12
month = month.where(month != 0, 12)
da_biased.coords['month'] = month
try:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],
init_date_name=init_date_name, lead_time_name=lead_time_name)
except ValueError:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,
init_date_name=init_date_name, lead_time_name=lead_time_name)
if da_target_clim is not None:
da_target_mean = da_target.groupby('time.month').mean('time')
da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_meancorr[lead_time_name] = da_biased[lead_time_name]
da_meancorr.coords['month'] = month
da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)
da_anom_meancorr.coords['month'] = month
else:
da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]
da_anom_meancorr.coords['month'] = month
try:
da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=[init_date_name,'ensemble'],
init_date_name=init_date_name, lead_time_name=lead_time_name)
except ValueError:
da_biased_std_tmp = da_anom_meancorr.groupby('month').apply(_groupby_lead_and_std, over_dims=init_date_name,
init_date_name=init_date_name, lead_time_name=lead_time_name)
try:
da_target_std = da_target.sel(lat=da_biased.lat, lon=da_biased.lon).groupby('time.month').std('time')
except:
da_target_std = da_target.groupby('time.month').std('time')
da_anom_stdcorr_tmp = da_anom_meancorr.groupby('month').apply(_unstack_and_scale_per_month,
scale=(da_target_std / da_biased_std_tmp),
init_date_name=init_date_name,
lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_anom_stdcorr_tmp[lead_time_name] = da_biased[lead_time_name]
da_anom_stdcorr_tmp.coords['month'] = month
try:
da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std(['time','ensemble'])
except ValueError:
da_biased_std = concat_times(da_anom_stdcorr_tmp).groupby('time.month').std('time')
da_anom_stdcorr = da_anom_stdcorr_tmp.groupby(init_date_name).apply(_rescale, scale=(da_target_std / da_biased_std))
if da_target_clim is not None:
da_stdcorr = da_anom_stdcorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)
return da_stdcorr.drop('month'), da_anom_stdcorr.drop('month')
else:
return da_anom_stdcorr.drop('month')
def bias_correct_m(da_biased, da_target, da_target_clim=None, init_date_name='init_date',
lead_time_name='lead_time'):
def _groupby_lead_and_mean(da, over_dims, init_date_name, lead_time_name):
return da.unstack('stacked_' + init_date_name + '_' + lead_time_name).groupby(lead_time_name).mean(over_dims, skipna=True)
def _unstack_and_shift_per_month(da, shift, init_date_name, lead_time_name):
da_us = da.unstack('stacked_' + init_date_name + '_' + lead_time_name)
the_month = np.ndarray.flatten(da_us.month.values)
the_month = int(np.unique(the_month[~np.isnan(the_month)]))
return da_us - shift.sel(month=the_month)
_anomalize = lambda data, clim: datetime_to_leadtime(
anomalize(
leadtime_to_datetime(data),clim))
da_biased = da_biased.copy()
da_target = da_target.copy()
month = (da_biased[init_date_name].dt.month + da_biased[lead_time_name]) % 12
month = month.where(month != 0, 12)
da_biased.coords['month'] = month
try:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=[init_date_name,'ensemble'],
init_date_name=init_date_name, lead_time_name=lead_time_name)
except ValueError:
da_biased_mean = da_biased.groupby('month').apply(_groupby_lead_and_mean, over_dims=init_date_name,
init_date_name=init_date_name, lead_time_name=lead_time_name)
if da_target_clim is not None:
da_target_mean = da_target.groupby('time.month').mean('time')
da_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean - da_target_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_meancorr[lead_time_name] = da_biased[lead_time_name]
da_meancorr.coords['month'] = month
da_anom_meancorr = da_meancorr.groupby(init_date_name).apply(_anomalize, clim=da_target_clim)
da_anom_meancorr.coords['month'] = month
else:
da_anom_meancorr = da_biased.groupby('month').apply(_unstack_and_shift_per_month, shift=(da_biased_mean),
init_date_name=init_date_name, lead_time_name=lead_time_name) \
.mean('month', skipna=True)
da_anom_meancorr[lead_time_name] = da_anom_meancorr[lead_time_name]
da_anom_meancorr.coords['month'] = month
if da_target_clim is not None:
da_meancorrr = da_anom_meancorr.groupby(init_date_name).apply(_anomalize, clim=-da_target_clim)
return da_meancorr.drop('month'), da_anom_meancorr.drop('month')
else:
return da_anom_meancorr.drop('month')
def conditional_bias_correct(da_cmp, da_ref, over_dims):
cc = skill.compute_Pearson_corrcoef(da_cmp.mean('ensemble'), da_ref, over_dims=over_dims, subtract_local_mean=False)
correct_cond_bias = (da_ref.std(over_dims) / da_cmp.mean('ensemble').std(over_dims)) * cc
return da_cmp * correct_cond_bias
def trunc_time(time, freq):
return time.astype('<M8[' + freq + ']')
def month_delta(date_in, delta, trunc_to_start=False):
date_mod = pd.Timestamp(date_in)
m, y = (date_mod.month + delta) % 12, date_mod.year + ((date_mod.month) + delta - 1) // 12
if not m: m = 12
d = min(date_mod.day, [31,
29 if y % 4 == 0 and not y % 400 == 0 else 28,31,30,31,30,31,31,30,31,30,31][m - 1])
if trunc_to_start:
date_out = trunc_time(np.datetime64(date_mod.replace(day=d,month=m, year=y)),'M')
else:
date_out = np.datetime64(date_mod.replace(day=d,month=m, year=y))
return np.datetime64(date_out,'ns')
def year_delta(date_in, delta, trunc_to_start=False):
date_mod = month_delta(date_in, 12 * delta)
if trunc_to_start:
date_out = trunc_time(date_mod,'Y')
else: date_out = date_mod
return date_out
def datetime_to_leadtime(data_in):
init_date = data_in.time.values[0]
lead_times = range(len(data_in.time))
try:
freq = pd.infer_freq(data_in.time.values)
if '-' in freq:
freq = freq[:freq.find('-')]
incr_string = ''.join([i for i in freq if i.isdigit()])
freq_incr = [int(incr_string) if incr_string else 1][0]
freq_type = ''.join([i for i in freq if not i.isdigit()])
if 'QS' in freq_type:
freq = str(3*freq_incr) + 'MS'
elif 'Q' in freq_type:
freq = str(3*freq_incr) + 'M'
elif ('YS' in freq_type) | ('AS' in freq_type):
freq = str(12*freq_incr) + 'MS'
elif ('Y' in freq_type) | ('A' in freq_type):
freq = str(12*freq_incr) + 'M'
except ValueError:
dt = (data_in.time.values[1] - data_in.time.values[0]) / np.timedelta64(1, 's')
month = data_in.time.dt.month[0]
if dt == 60*60*24:
freq = 'D'
elif ((month == 1) | (month == 3) | (month == 5) | (month == 7) | (month == 8) | (month == 10) |
(month == 12)) & (dt == 31*60*60*24):
freq = 'MS'
elif ((month == 4) | (month == 6) | (month == 9) | (month == 11)) & (dt == 30*60*60*24):
freq = 'MS'
elif (month == 2) & ((dt == 28*60*60*24) | (dt == 29*60*60*24)):
freq = 'MS'
elif (dt == 365*60*60*24) | (dt == 366*60*60*24):
freq = 'A'
else:
freq = 'NA'
data_out = data_in.rename({'time' : 'lead_time'})
data_out['lead_time'] = lead_times
data_out['lead_time'].attrs['units'] = freq
data_out.coords['init_date'] = init_date
return data_out
def leadtime_to_datetime(data_in, init_date_name='init_date', lead_time_name='lead_time'):
try:
init_date = data_in[init_date_name].values[0]
except IndexError:
init_date = data_in[init_date_name].values
lead_times = list(map(int, data_in[lead_time_name].values))
freq = data_in[lead_time_name].attrs['units']
datetimes = (pd.date_range(init_date, periods=len(lead_times), freq=freq)).values
data_out = data_in.drop(init_date_name)
data_out = data_out.rename({lead_time_name : 'time'})
data_out['time'] = datetimes
return prune(data_out)
def get_nearest_point(da, lat, lon):
return da.sel(lat=lat,lon=lon,method='nearest')
def plot_fields(data, title=None, headings=None, ncol=2, contour=False, vlims=None, clims=None, squeeze_row=1,
squeeze_col=1, squeeze_cbar=1, shift_cbar=1, cmap='viridis', fontsize=12, invert=False):
def _depth(seq):
for level in count():
if not seq:
return level
seq = list(chain.from_iterable(s for s in seq if isinstance(s, Sequence)))
matplotlib.rc('font', family='sans-serif')
matplotlib.rc('font', serif='Helvetica')
matplotlib.rc('text', usetex='false')
matplotlib.rcParams.update({'font.size': fontsize})
nrow = int(np.ceil(len(data)/ncol));
fig = plt.figure(figsize=(11*squeeze_col, nrow*4*squeeze_row))
if (clims is not None) & (np.shape(vlims) != np.shape(clims)):
raise ValueError('The input clims must be equal in size to vlims')
one_cbar = False
if vlims is None:
vlims = [[None, None]] * len(data)
if _depth(vlims) == 1:
one_cbar = True
over_count = 1
for idx,dat in enumerate(data):
if one_cbar:
vmin, vmax = vlims
if clims is not None:
cmin, cmax = clims
else:
vmin, vmax = vlims[idx]
if clims is not None:
cmin, cmax = clims[idx]
if ('lat' in dat.dims) and ('lon' in dat.dims):
trans = cartopy.crs.PlateCarree()
ax = plt.subplot(nrow, ncol, over_count, projection=cartopy.crs.PlateCarree(central_longitude=180))
extent = [dat.lon.min(), dat.lon.max(),
dat.lat.min(), dat.lat.max()]
if contour is True:
if clims is not None:
ax.coastlines(color='gray')
im = ax.contourf(dat.lon, dat.lat, dat, levels=np.linspace(vmin,vmax,12), origin='lower', transform=trans,
vmin=vmin, vmax=vmax, cmap=cmap)
ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,
vmin=vmin, vmax=vmax, colors='w', linewidths=2)
ax.contour(dat.lon, dat.lat, dat, levels=np.linspace(cmin,cmax,12), origin='lower', transform=trans,
vmin=vmin, vmax=vmax, colors='k', linewidths=1)
else:
ax.coastlines(color='black')
im = ax.contourf(dat.lon, dat.lat, dat, origin='lower', transform=trans, vmin=vmin, vmax=vmax,
cmap=cmap)
else:
ax.coastlines(color='black')
im = ax.imshow(dat, origin='lower', extent=extent, transform=trans, vmin=vmin, vmax=vmax, cmap=cmap)
gl = ax.gridlines(crs=cartopy.crs.PlateCarree(), draw_labels=True)
gl.xlines = False
gl.ylines = False
gl.xlabels_top = False
if over_count % ncol == 0:
gl.ylabels_left = False
elif (over_count+ncol-1) % ncol == 0:
gl.ylabels_right = False
else:
gl.ylabels_left = False
gl.ylabels_right = False
gl.xlocator = mticker.FixedLocator([-90, 0, 90, 180])
gl.ylocator = mticker.FixedLocator([-90, -60, 0, 60, 90])
gl.xformatter = LONGITUDE_FORMATTER
gl.yformatter = LATITUDE_FORMATTER
if not one_cbar:
cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)
tick_locator = mticker.MaxNLocator(nbins=6)
cbar.locator = tick_locator
cbar.update_ticks()
if headings is not None:
cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);
elif headings is not None:
ax.set_title(headings[idx], fontsize=fontsize)
else:
ax = plt.subplot(nrow, ncol, over_count)
if 'lat' in dat.dims:
x_plt = dat['lat']
y_plt = dat[utils.get_other_dims(dat,'lat')[0]]
elif 'lon' in dat.dims:
x_plt = dat['lon']
y_plt = dat[utils.get_other_dims(dat,'lon')[0]]
else:
x_plt = dat[dat.dims[1]]
y_plt = dat[dat.dims[0]]
extent = [x_plt.min(), x_plt.max(),
y_plt.min(), y_plt.max()]
if contour is True:
if clims is not None:
im = ax.contourf(x_plt, y_plt, dat, levels=np.linspace(vmin,vmax,12), vmin=vmin, vmax=vmax,
cmap=cmap)
ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='w', linewidths=2)
ax.contour(x_plt, y_plt, dat, levels=np.linspace(cmin,cmax,12), colors='k', linewidths=1)
else:
im = ax.contourf(x_plt, y_plt, dat, vmin=vmin, vmax=vmax, cmap=cmap)
else:
im = ax.imshow(dat, origin='lower', extent=extent, vmin=vmin, vmax=vmax, cmap=cmap)
if over_count % ncol == 0:
ax.yaxis.tick_right()
elif (over_count+ncol-1) % ncol == 0:
ax.set_ylabel(y_plt.dims[0], fontsize=fontsize)
else:
ax.set_yticks([])
if idx / ncol >= nrow - 1:
ax.set_xlabel(x_plt.dims[0], fontsize=fontsize)
if not one_cbar:
cbar = plt.colorbar(im, ax=ax, orientation="horizontal", aspect=30/squeeze_cbar, pad=shift_cbar*0.1)
tick_locator = mticker.MaxNLocator(nbins=6)
cbar.locator = tick_locator
cbar.update_ticks()
if headings is not None:
cbar.set_label(headings[idx], labelpad=5, fontsize=fontsize);
elif headings is not None:
ax.set_title(headings[idx], fontsize=fontsize)
if invert:
ax.invert_yaxis()
over_count += 1
plt.tight_layout()
if one_cbar:
vmin, vmax = vlims
fig.subplots_adjust(bottom=shift_cbar*0.16)
cbar_ax = fig.add_axes([0.15, 0.13, 0.7, squeeze_cbar*0.020])
cbar = fig.colorbar(im, cax=cbar_ax, orientation='horizontal');
cbar_ax.set_xlabel(title, rotation=0, labelpad=15, fontsize=fontsize);
cbar.set_ticks(np.linspace(vmin,vmax,5))
elif title is not None:
fig.suptitle(title, y=1)
def size_GB(xr_object):
bytes = xr_object.nbytes
Ten2the9 = 10**9
Two2the30 = 2**30
GBytes = bytes / Ten2the9
GiBytes = bytes / Two2the30
print(xr_object.name, "is", GBytes, "GB", 'which is', GiBytes,"GiB")
return GBytes,GiBytes
def get_pres_name(da):
if 'pfull' in da.dims:
return 'pfull'
elif 'phalf' in da.dims:
return 'phalf'
else:
raise KeyError('Unable to determine pressure dimension')
pass
def did_event(da, event):
eval_expr = event.replace(">", "da >").replace("<", "da <").replace("==", "da ==") \
.replace("=", "da ==").replace('&&', '&').replace('||', '|') \
.replace("and", "&").replace("or", "|")
eval_expr = '(' + eval_expr + ').rename("event_logical")'
return eval(eval_expr)
def compute_likelihood(da_logical, dim='ensemble'):
if dim == None:
likelihood = da_logical
else:
likelihood = da_logical.mean(dim=dim).rename('likelihood')
return likelihood
def atmos_energy_cycle(temp, u, v, omega, gh, terms=None, vgradz=False, spectral=False, n_wavenumbers=20,
integrate=True, loop_triple_terms=False, lat_name=None, lon_name=None,
plevel_name=None):
def _flip_n(da):
daf = da.copy()
daf['n'] = -daf['n']
return daf.sortby(daf['n'])
def _truncate(F, n_truncate, dim):
F[dim] = 360 * F[dim]
F = F.rename({dim : 'n'})
F = F.where(abs(F.n) <= n_truncate, drop=True)
return F, _flip_n(F)
def _triple_terms(A, B, C):
Am = A.rename({'n' : 'm'})
Cnm = C.rolling(n=len(C.n), center=True).construct('m', fill_value=0)
Cnm['m'] = -C['n'].values
Am = Am.where(Am['m'] != 0, drop=True)
Cnm = Cnm.where(Cnm['m'] != 0, drop=True)
return (B * (Am * Cnm)).sum(dim='m', skipna=False)
def _triple_terms_loop(A, B, C):
ms = A['n'].where(A['n'] != 0, drop=True).values
ABC = A.copy() * 0
for m in ms:
Am = A.sel(n=m)
Cnm = C.shift(n=int(m)).fillna(0)
ABC = ABC + (Am * B * Cnm)
return ABC
if terms is None:
terms = ['Pz', 'Kz', 'Pe', 'Ke', 'Cz', 'Ca', 'Ce', 'Ck', 'Gz', 'Ge', 'Dz', 'De']
if isinstance(terms, str):
terms = [terms]
# Initialize some things -----
if lat_name is None:
lat_name = utils.get_lat_name(temp)
if lon_name is None:
lon_name = utils.get_lon_name(temp)
if plevel_name is None:
plevel_name = utils.get_plevel_name(temp)
degtorad = utils.constants().pi / 180
tan_lat = xr.ufuncs.tan(temp[lat_name] * degtorad)
cos_lat = xr.ufuncs.cos(temp[lat_name] * degtorad)
# Determine the stability parameter using Saltzman's approach -----
kappa = utils.constants().R_d / utils.constants().C_pd
p_kap = (1000 / temp[plevel_name]) ** kappa
theta_A = utils.average(temp * p_kap, [lat_name, lon_name], weights=cos_lat)
dtheta_Adp = utils.differentiate_wrt(theta_A, dim=plevel_name, x=(theta_A[plevel_name] * 100))
gamma = - p_kap * (utils.constants().R_d) / ((temp[plevel_name] * 100) * utils.constants().C_pd) / dtheta_Adp
energies = gamma.rename('gamma').to_dataset()
if ('Pz' in terms):
temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)
temp_Z = temp.mean(dim=lon_name)
temp_Za = temp_Z - temp_A
Pz_int = gamma * utils.constants().C_pd / 2 * temp_Za ** 2
energies['Pz_int'] = Pz_int
if integrate:
Pz = _int_over_atmos(Pz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Pz'] = Pz
if ('Kz' in terms):
u_Z = u.mean(dim=lon_name)
v_Z = v.mean(dim=lon_name)
Kz_int = 0.5 * (u_Z ** 2 + v_Z ** 2)
energies['Kz_int'] = Kz_int
if integrate:
Kz = _int_over_atmos(Kz_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])
energies['Kz'] = Kz
if ('Cz' in terms):
if vgradz:
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
gh_Z = gh.mean(dim=lon_name)
dghdlat = utils.differentiate_wrt(gh_Z, dim=lat_name, x=(gh_Z[lat_name] * degtorad))
Cz_int = - (utils.constants().g / utils.constants().R_earth) * v_Z * dghdlat
energies['Cz_int'] = Cz_int
if integrate:
Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=gh[lon_name])
energies['Cz'] = Cz
else:
if 'temp_Za' not in locals():
temp_A = utils.average(temp, [lat_name, lon_name], weights=cos_lat)
temp_Z = temp.mean(dim=lon_name)
temp_Za = temp_Z - temp_A
omega_A = utils.average(omega, [lat_name, lon_name], weights=cos_lat)
omega_Z = omega.mean(dim=lon_name)
omega_Za = omega_Z - omega_A
Cz_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * omega_Za * temp_Za
energies['Cz_int'] = Cz_int
if integrate:
Cz = _int_over_atmos(Cz_int, lat_name, lon_name, plevel_name, lon_dim=omega[lon_name])
energies['Cz'] = Cz
if spectral:
if ('Pe' in terms):
Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) /
len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Pn_int = (gamma * utils.constants().C_pd * abs(Bp) ** 2)
energies['Pn_int'] = Pn_int
if integrate:
Pn = _int_over_atmos(Pn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Pn'] = Pn
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dBpdlat = utils.differentiate_wrt(Bp, dim=lat_name, x=(Bp[lat_name] * degtorad))
dBndlat = utils.differentiate_wrt(Bn, dim=lat_name, x=(Bn[lat_name] * degtorad))
dBpdp = utils.differentiate_wrt(Bp, dim=plevel_name, x=(Bp[plevel_name] * 100))
dBndp = utils.differentiate_wrt(Bn, dim=plevel_name, x=(Bn[plevel_name] * 100))
if loop_triple_terms:
BpBnUp = _triple_terms_loop(Bp, Bn, Up)
BpBpUn = _triple_terms_loop(Bp, Bp, Un)
BpglBnVp = _triple_terms_loop(Bp, dBndlat, Vp)
BpglBpVn = _triple_terms_loop(Bp, dBpdlat, Vn)
BpgpBnOp = _triple_terms_loop(Bp, dBndp, Op)
BpgpBpOn = _triple_terms_loop(Bp, dBpdp, On)
BpBnOp = _triple_terms_loop(Bp, Bn, Op)
BpBpOn = _triple_terms_loop(Bp, Bp, On)
else:
BpBnUp = _triple_terms(Bp, Bn, Up)
BpBpUn = _triple_terms(Bp, Bp, Un)
BpglBnVp = _triple_terms(Bp, dBndlat, Vp)
BpglBpVn = _triple_terms(Bp, dBpdlat, Vn)
BpgpBnOp = _triple_terms(Bp, dBndp, Op)
BpgpBpOn = _triple_terms(Bp, dBpdp, On)
BpBnOp = _triple_terms(Bp, Bn, Op)
BpBpOn = _triple_terms(Bp, Bp, On)
Sn_int = -gamma * utils.constants().C_pd * (1j * Bp['n']) / \
(utils.constants().R_earth * xr.ufuncs.cos(Bp[lat_name] * degtorad)) * \
(BpBnUp + BpBpUn) + \
gamma * utils.constants().C_pd / utils.constants().R_earth * \
(BpglBnVp + BpglBpVn) + \
gamma * utils.constants().C_pd * (BpgpBnOp + BpgpBpOn) + \
gamma * utils.constants().R_d / Bp[plevel_name] * \
(BpBnOp + BpBpOn)
energies['Sn_int'] = Sn_int
if integrate:
Sn = abs(_int_over_atmos(Sn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]))
energies['Sn'] = Sn
if ('Ke' in terms):
if 'U' not in locals():
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Kn_int = abs(Up) ** 2 + abs(Vp) ** 2
energies['Kn_int'] = Kn_int
if integrate:
Kn = _int_over_atmos(Kn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])
energies['Kn'] = Kn
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dUpdp = utils.differentiate_wrt(Up, dim=plevel_name, x=(Up[plevel_name] * 100))
dVpdp = utils.differentiate_wrt(Vp, dim=plevel_name, x=(Vp[plevel_name] * 100))
dOpdp = utils.differentiate_wrt(Op, dim=plevel_name, x=(Op[plevel_name] * 100))
dOndp = utils.differentiate_wrt(On, dim=plevel_name, x=(On[plevel_name] * 100))
dVpcdl = utils.differentiate_wrt(Vp * cos_lat, dim=lat_name, x=(Vp[lat_name] * degtorad))
dVncdl = utils.differentiate_wrt(Vn * cos_lat, dim=lat_name, x=(Vn[lat_name] * degtorad))
dUpdl = utils.differentiate_wrt(Up, dim=lat_name, x=(Up[lat_name] * degtorad))
dVpdl = utils.differentiate_wrt(Vp, dim=lat_name, x=(Vp[lat_name] * degtorad))
if loop_triple_terms:
UpUnUp = _triple_terms_loop(Up, Un, Up)
UpUpUn = _triple_terms_loop(Up, Up, Un)
VpVnUp = _triple_terms_loop(Vp, Vn, Up)
VpVpUn = _triple_terms_loop(Vp, Vp, Un)
VpUnUp = _triple_terms_loop(Vp, Un, Up)
VpUpUn = _triple_terms_loop(Vp, Up, Un)
UpVnUp = _triple_terms_loop(Up, Vn, Up)
UpVpUn = _triple_terms_loop(Up, Vp, Un)
gpUpUngpOp = _triple_terms_loop(dUpdp, Un, dOpdp)
gpUpUpgpOn = _triple_terms_loop(dUpdp, Up, dOndp)
gpVpVngpOp = _triple_terms_loop(dVpdp, Vn, dOpdp)
gpVpVpgpOn = _triple_terms_loop(dVpdp, Vp, dOndp)
glUpUnglVpc = _triple_terms_loop(dUpdl, Un, dVpcdl)
glUpUpglVnc = _triple_terms_loop(dUpdl, Up, dVncdl)
glVpVnglVpc = _triple_terms_loop(dVpdl, Vn, dVpcdl)
glVpVpglVnc = _triple_terms_loop(dVpdl, Vp, dVncdl)
else:
UpUnUp = _triple_terms(Up, Un, Up)
UpUpUn = _triple_terms(Up, Up, Un)
VpVnUp = _triple_terms(Vp, Vn, Up)
VpVpUn = _triple_terms(Vp, Vp, Un)
VpUnUp = _triple_terms(Vp, Un, Up)
VpUpUn = _triple_terms(Vp, Up, Un)
UpVnUp = _triple_terms(Up, Vn, Up)
UpVpUn = _triple_terms(Up, Vp, Un)
gpUpUngpOp = _triple_terms(dUpdp, Un, dOpdp)
gpUpUpgpOn = _triple_terms(dUpdp, Up, dOndp)
gpVpVngpOp = _triple_terms(dVpdp, Vn, dOpdp)
gpVpVpgpOn = _triple_terms(dVpdp, Vp, dOndp)
glUpUnglVpc = _triple_terms(dUpdl, Un, dVpcdl)
glUpUpglVnc = _triple_terms(dUpdl, Up, dVncdl)
glVpVnglVpc = _triple_terms(dVpdl, Vn, dVpcdl)
glVpVpglVnc = _triple_terms(dVpdl, Vp, dVncdl)
Ln_int = -(1j * Up['n']) / (utils.constants().R_earth * cos_lat) * \
(UpUnUp - UpUpUn) + \
(1j * Vp['n']) / (utils.constants().R_earth * cos_lat) * \
(VpVnUp - VpVpUn) - \
tan_lat / utils.constants().R_earth * \
(VpUnUp + VpUpUn) + \
tan_lat / utils.constants().R_earth * \
(UpVnUp + UpVpUn) + \
(gpUpUngpOp + gpUpUpgpOn) + \
(gpVpVngpOp + gpVpVpgpOn) + \
1 / (utils.constants().R_earth * cos_lat) * \
(glUpUnglVpc + glUpUpglVnc + glVpVnglVpc + glVpVpglVnc)
energies['Ln_int'] = Ln_int
if integrate:
Ln = abs(_int_over_atmos(Ln_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]))
energies['Ln'] = Ln
if ('Ca' in terms):
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'B' not in locals():
Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) /
len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))
theta = temp * p_kap
theta_Z = theta.mean(dim=lon_name)
theta_Za = theta_Z - theta_A
dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))
Rn_int = gamma * utils.constants().C_pd * ((dtemp_Zdlat / utils.constants().R_earth) * (Vp * Bn + Vn * Bp) +
(p_kap * dtheta_Zadp) * (Op * Bn + On * Bp))
energies['Rn_int'] = Rn_int
if integrate:
Rn = abs(_int_over_atmos(Rn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]))
energies['Rn'] = Rn
if ('Ce' in terms):
if vgradz:
if 'U' not in locals():
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Ap, An = _truncate(utils.fft(gh, dim=lon_name, nfft=len(gh[lon_name]), twosided=True, shift=True) /
len(gh[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dApdlat = utils.differentiate_wrt(Ap, dim=lat_name, x=(Ap[lat_name] * degtorad))
dAndlat = utils.differentiate_wrt(An, dim=lat_name, x=(An[lat_name] * degtorad))
Cn_int = (((-1j * utils.constants().g * Up['n']) / \
(utils.constants().R_earth * xr.ufuncs.cos(Up[lat_name] * degtorad))) * \
(Ap * Un - An * Up)) - \
((utils.constants().g / utils.constants().R_earth) * \
(dApdlat * Vn + dAndlat * Vp))
energies['Cn_int'] = Cn_int
if integrate:
Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]))
energies['Cn'] = Cn
else:
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'B' not in locals():
Bp, Bn = _truncate(utils.fft(temp, dim=lon_name, nfft=len(temp[lon_name]), twosided=True, shift=True) /
len(temp[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
Cn_int = - (utils.constants().R_d / (omega[plevel_name] * 100)) * (Op * Bn + On * Bp)
energies['Cn_int'] = Cn_int
if integrate:
Cn = abs(_int_over_atmos(Cn_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name]))
energies['Cn'] = Cn
if ('Ck' in terms):
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
if 'u_Z' not in locals():
u_Z = u.mean(dim=lon_name)
if 'U' not in locals():
Up, Un = _truncate(utils.fft(u, dim=lon_name, nfft=len(u[lon_name]), twosided=True, shift=True) /
len(u[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'V' not in locals():
Vp, Vn = _truncate(utils.fft(v, dim=lon_name, nfft=len(v[lon_name]), twosided=True, shift=True) /
len(v[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
if 'O' not in locals():
Op, On = _truncate(utils.fft(omega, dim=lon_name, nfft=len(omega[lon_name]), twosided=True, shift=True) /
len(omega[lon_name]), n_truncate=n_wavenumbers, dim='f_'+lon_name)
dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v[lat_name] * degtorad))
du_Zndlat = utils.differentiate_wrt(u_Z / xr.ufuncs.cos(u[lat_name] * degtorad),
dim=lat_name, x=(u[lat_name] * degtorad))
dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v[plevel_name] * 100))
du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u[plevel_name] * 100))
Mn_int = (-2 * Up * Un * v_Z * tan_lat / utils.constants().R_earth) + \
(2 * Vp * Vn * dv_Zdlat / utils.constants().R_earth + (Vp * On + Vn * Op) * dv_Zdp) + \
((Up * On + Un * Op) * du_Zdp) + \
((Up * Vn + Un * Vp) * xr.ufuncs.cos(u[lat_name] * degtorad) / \
utils.constants().R_earth * du_Zndlat)
energies['Mn_int'] = Mn_int
if integrate:
Mn = abs(_int_over_atmos(Mn_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name]))
energies['Mn'] = Mn
else:
if ('Pe' in terms):
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
temp_z = temp - temp_Z
Pe_int = gamma * utils.constants().C_pd / 2 * (temp_z ** 2).mean(dim=lon_name)
energies['Pe_int'] = Pe_int
if integrate:
Pe = _int_over_atmos(Pe_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Pe'] = Pe
if ('Ke' in terms):
if 'u_Z' not in locals():
u_Z = u.mean(dim=lon_name)
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
u_z = u - u_Z
v_z = v - v_Z
Ke_int = 0.5 * (u_z ** 2 + v_z ** 2).mean(dim=lon_name)
energies['Ke_int'] = Ke_int
if integrate:
Ke = _int_over_atmos(Ke_int, lat_name, lon_name, plevel_name, lon_dim=u[lon_name])
energies['Ke'] = Ke
if ('Ca' in terms):
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
if 'omega_Z' not in locals():
omega_Z = omega.mean(dim=lon_name)
if 'theta_Z' not in locals():
theta = temp * p_kap
theta_Z = theta.mean(dim=lon_name)
if 'dtemp_Zdlat' not in locals():
dtemp_Zdlat = utils.differentiate_wrt(temp_Z, dim=lat_name, x=(temp_Z[lat_name] * degtorad))
v_z = v - v_Z
temp_z = temp - temp_Z
omega_z = omega - omega_Z
oT_Z = (omega_z * temp_z).mean(dim=lon_name)
oT_A = utils.average(omega_z * temp_z, [lat_name, lon_name], weights=cos_lat)
oT_Za = oT_Z - oT_A
theta_Za = theta_Z - theta_A
dtheta_Zadp = utils.differentiate_wrt(theta_Za, dim=plevel_name, x=(theta_Za[plevel_name] * 100))
Ca_int = - gamma * utils.constants().C_pd * \
(((v_z * temp_z).mean(dim=lon_name) * dtemp_Zdlat / utils.constants().R_earth) + \
(p_kap * oT_Za * dtheta_Zadp))
energies['Ca_int'] = Ca_int
if integrate:
Ca = _int_over_atmos(Ca_int, lat_name, lon_name, plevel_name, lon_dim=v[lon_name])
energies['Ca'] = Ca
if ('Ce' in terms):
if 'temp_Z' not in locals():
temp_Z = temp.mean(dim=lon_name)
if 'omega_Z' not in locals():
omega_Z = omega.mean(dim=lon_name)
temp_z = temp - temp_Z
omega_z = omega - omega_Z
Ce_int = - (utils.constants().R_d / (temp[plevel_name] * 100)) * \
(omega_z * temp_z).mean(dim=lon_name)
energies['Ce_int'] = Ce_int
if integrate:
Ce = _int_over_atmos(Ce_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Ce'] = Ce
if ('Ck' in terms):
if 'u_Z' not in locals():
u_Z = u.mean(dim=lon_name)
if 'v_Z' not in locals():
v_Z = v.mean(dim=lon_name)
if 'omega_Z' not in locals():
omega_Z = omega.mean(dim=lon_name)
u_z = u - u_Z
v_z = v - v_Z
omega_z = omega - omega_Z
du_Zndlat = utils.differentiate_wrt(u_Z / cos_lat, dim=lat_name, x=(u_Z[lat_name] * degtorad))
dv_Zdlat = utils.differentiate_wrt(v_Z, dim=lat_name, x=(v_Z[lat_name] * degtorad))
du_Zdp = utils.differentiate_wrt(u_Z, dim=plevel_name, x=(u_Z[plevel_name] * 100))
dv_Zdp = utils.differentiate_wrt(v_Z, dim=plevel_name, x=(v_Z[plevel_name] * 100))
Ck_int = (u_z * v_z).mean(dim=lon_name) * cos_lat * du_Zndlat / utils.constants().R_earth + \
(u_z * omega_z).mean(dim=lon_name) * du_Zdp + \
(v_z ** 2).mean(dim=lon_name) * dv_Zdlat / utils.constants().R_earth + \
(v_z * omega_z).mean(dim=lon_name) * dv_Zdp - \
(u_z ** 2).mean(dim=lon_name) * v_Z * tan_lat / utils.constants().R_earth
energies['Ck_int'] = Ck_int
if integrate:
Ck = _int_over_atmos(Ck_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Ck'] = Ck
if ('Gz' in terms):
if ('Cz' not in terms) | ('Ca' not in terms):
raise ValueError('The rate of generation of zonal available potential energy, Gz, is computed from the sum of Cz and Ca. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('Rate of generation of zonal available potential energy is computed from the sum of Cz and Ca and cannot be computed in Fourier space. Returning Gz in real-space.')
Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real
Gz_int = Cz_int + Ca_int
energies['Gz_int'] = Gz_int
if integrate:
Gz = _int_over_atmos(Gz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Gz'] = Gz
if ('Ge' in terms):
if ('Ce' not in terms) | ('Ca' not in terms):
raise ValueError('The rate of generation of eddy available potential energy, Ge, is computed from the residual of Ce and Ca. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('The rate of generation of eddy available potential energy is computed from the residual of Ce and Ca and cannot be computed in Fourier space. Returning Ge in real-space.')
Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real
if 'Ca_int' not in locals():
Ca_int = Rn_int.where(Rn_int.n > 0, drop=True).sum(dim='n').real
Ge_int = Ce_int - Ca_int
energies['Ge_int'] = Ge_int
if integrate:
Ge = _int_over_atmos(Ge_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Ge'] = Ge
if ('Dz' in terms):
if ('Cz' not in terms) | ('Ck' not in terms):
raise ValueError('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('The rate of viscous dissipation of zonal kinetic energy, Dz, is computed from the residual of Cz and Ck and cannot be computed in Fourier space. Returning De in real-space.')
Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real
Dz_int = Cz_int - Ck_int
energies['Dz_int'] = Dz_int
if integrate:
Dz = _int_over_atmos(Dz_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['Dz'] = Dz
if ('De' in terms):
if ('Ce' not in terms) | ('Ck' not in terms):
raise ValueError('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck. Please add these to the list, terms=[<terms>].')
if spectral:
warnings.warn('The rate of viscous dissipation of eddy kinetic energy, De, is computed from the residual of Ce and Ck and cannot be computed in Fourier space. Returning De in real-space.')
if 'Ce_int' not in locals():
Ce_int = Cn_int.where(Cn_int.n > 0, drop=True).sum(dim='n').real
if 'Ck_int' not in locals():
Ck_int = Mn_int.where(Mn_int.n > 0, drop=True).sum(dim='n').real
De_int = Ce_int - Ck_int
energies['De_int'] = De_int
if integrate:
De = _int_over_atmos(De_int, lat_name, lon_name, plevel_name, lon_dim=temp[lon_name])
energies['De'] = De
return energies
def auto_merge(paths, preprocess=None, parallel=True, **kwargs):
if parallel:
open_ = dask.delayed(xr.open_dataset)
getattr_ = dask.delayed(getattr)
if preprocess is not None:
preprocess = dask.delayed(preprocess)
else:
open_ = open_dataset
getattr_ = getattr
datasets = [open_(p, **kwargs) for p in paths]
file_objs = [getattr_(ds, '_file_obj') for ds in datasets]
if preprocess is not None:
datasets = [preprocess(ds) for ds in datasets]
if parallel:
datasets, file_objs = dask.compute(datasets, file_objs)
def _combine_along_last_dim(datasets):
merged = []
split_dims = [d for d in datasets[0].dims if
len(np.unique([ds[d].values[0] for ds in datasets])) > 1]
concat_dim = split_dims[-1]
sorted_ds = sorted(datasets, key=lambda ds: tuple(ds[d].values[0]
for d in split_dims))
for _, group in itertools.groupby(
sorted_ds,
key=lambda ds: tuple(ds[d].values[0] for d in split_dims[:-1])
):
merged.append(xr.auto_combine(group, concat_dim=concat_dim))
return merged
merged = datasets
while len(merged) > 1:
merged = _combine_along_last_dim(merged)
return merged[0] | true | true |
f723c95d5658ceeced474744cccb763f8b5be84d | 2,382 | py | Python | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | 5 | 2021-01-06T16:49:22.000Z | 2021-02-19T05:34:27.000Z | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | null | null | null | Foundations_of_Private_Computation/Split_Learning/concepts-definitions-code/ite-repo/demos/analytical_values/demo_k_ejt1.py | gonzalo-munillag/Private_AI_OpenMined | c23da9cc1c914d10646a0c0bc1a2497fe2cbaaca | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
""" Demo for exponentiated Jensen-Tsallis kernel-1 estimators.
Analytical vs estimated value is illustrated for spherical normal random
variables.
"""
from numpy import eye
from numpy.random import rand, multivariate_normal, randn
from scipy import arange, zeros, ones
import matplotlib.pyplot as plt
from ite.cost.x_factory import co_factory
from ite.cost.x_analytical_values import analytical_value_k_ejt1
def main():
# parameters:
dim = 1 # dimension of the distribution
num_of_samples_v = arange(1000, 50*1000+1, 2000)
u = 0.8 # >0, parameter of the Jensen-Tsallis kernel
cost_name = 'MKExpJT1_HT' # dim >= 1
# initialization:
alpha = 2
# fixed; parameter of the Jensen-Tsallis kernel; for alpha = 2 we have
# explicit formula for the Tsallis entropy, and hence for the
# Jensen-Tsallis kernel(-1).
distr = 'normal' # fixed
num_of_samples_max = num_of_samples_v[-1]
length = len(num_of_samples_v)
co = co_factory(cost_name, mult=True, alpha=alpha, u=u) # cost object
k_hat_v = zeros(length) # vector of estimated kernel values
# distr, dim -> samples (y1,y2), distribution parameters (par1,par2),
# analytical value (k):
if distr == 'normal':
# generate samples (y1,y2); y1~N(m1,s1^2xI), y2~N(m2,s2^2xI):
m1, s1 = randn(dim), rand(1)
m2, s2 = randn(dim), rand(1)
y1 = multivariate_normal(m1, s1**2 * eye(dim), num_of_samples_max)
y2 = multivariate_normal(m2, s2**2 * eye(dim), num_of_samples_max)
par1 = {"mean": m1, "std": s1}
par2 = {"mean": m2, "std": s2}
else:
raise Exception('Distribution=?')
k = analytical_value_k_ejt1(distr, distr, u, par1, par2)
# estimation:
for (tk, num_of_samples) in enumerate(num_of_samples_v):
k_hat_v[tk] = co.estimation(y1[0:num_of_samples],
y2[0:num_of_samples]) # broadcast
print("tk={0}/{1}".format(tk+1, length))
# plot:
plt.plot(num_of_samples_v, k_hat_v, num_of_samples_v, ones(length)*k)
plt.xlabel('Number of samples')
plt.ylabel('Exponentiated Jensen-Tsallis kernel-1')
plt.legend(('estimation', 'analytical value'), loc='best')
plt.title("Estimator: " + cost_name)
plt.show()
if __name__ == "__main__":
main()
| 33.549296 | 74 | 0.646096 |
from numpy import eye
from numpy.random import rand, multivariate_normal, randn
from scipy import arange, zeros, ones
import matplotlib.pyplot as plt
from ite.cost.x_factory import co_factory
from ite.cost.x_analytical_values import analytical_value_k_ejt1
def main():
dim = 1
num_of_samples_v = arange(1000, 50*1000+1, 2000)
u = 0.8
cost_name = 'MKExpJT1_HT'
alpha = 2
distr = 'normal'
num_of_samples_max = num_of_samples_v[-1]
length = len(num_of_samples_v)
co = co_factory(cost_name, mult=True, alpha=alpha, u=u)
k_hat_v = zeros(length)
if distr == 'normal':
m1, s1 = randn(dim), rand(1)
m2, s2 = randn(dim), rand(1)
y1 = multivariate_normal(m1, s1**2 * eye(dim), num_of_samples_max)
y2 = multivariate_normal(m2, s2**2 * eye(dim), num_of_samples_max)
par1 = {"mean": m1, "std": s1}
par2 = {"mean": m2, "std": s2}
else:
raise Exception('Distribution=?')
k = analytical_value_k_ejt1(distr, distr, u, par1, par2)
for (tk, num_of_samples) in enumerate(num_of_samples_v):
k_hat_v[tk] = co.estimation(y1[0:num_of_samples],
y2[0:num_of_samples])
print("tk={0}/{1}".format(tk+1, length))
plt.plot(num_of_samples_v, k_hat_v, num_of_samples_v, ones(length)*k)
plt.xlabel('Number of samples')
plt.ylabel('Exponentiated Jensen-Tsallis kernel-1')
plt.legend(('estimation', 'analytical value'), loc='best')
plt.title("Estimator: " + cost_name)
plt.show()
if __name__ == "__main__":
main()
| true | true |
f723ca086eef0104ef4640b52705cddbd642cc36 | 2,063 | py | Python | parsifal/reviews/migrations/0046_auto_20190717_2301.py | glauberferreira/parsifal-mec | 66f85e0d48a270bddd1170caa2131bc74872462d | [
"MIT"
] | 1 | 2019-06-13T16:09:26.000Z | 2019-06-13T16:09:26.000Z | parsifal/reviews/migrations/0046_auto_20190717_2301.py | glauberferreira/parsifal-mec | 66f85e0d48a270bddd1170caa2131bc74872462d | [
"MIT"
] | null | null | null | parsifal/reviews/migrations/0046_auto_20190717_2301.py | glauberferreira/parsifal-mec | 66f85e0d48a270bddd1170caa2131bc74872462d | [
"MIT"
] | 3 | 2019-10-05T04:16:59.000Z | 2021-04-20T05:00:50.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reviews', '0045_articlefile'),
]
operations = [
migrations.CreateModel(
name='ArticleEvaluation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='U', max_length=1, choices=[('U', 'Unclassified'), ('R', 'Rejected'), ('A', 'Accepted'), ('D', 'Duplicated')])),
('comments', models.TextField(max_length=2000, null=True, blank=True)),
],
),
migrations.RemoveField(
model_name='article',
name='comments',
),
migrations.RemoveField(
model_name='article',
name='selection_criteria',
),
migrations.RemoveField(
model_name='article',
name='status',
),
migrations.AddField(
model_name='articleevaluation',
name='article',
field=models.ForeignKey(related_name='evaluation_article', to='reviews.Article'),
),
migrations.AddField(
model_name='articleevaluation',
name='review',
field=models.ForeignKey(related_name='evaluation_review', to='reviews.Review'),
),
migrations.AddField(
model_name='articleevaluation',
name='selection_criteria',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='reviews.SelectionCriteria', null=True),
),
migrations.AddField(
model_name='articleevaluation',
name='user',
field=models.ForeignKey(related_name='evaluation_user', to=settings.AUTH_USER_MODEL),
),
]
| 35.568966 | 164 | 0.596704 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('reviews', '0045_articlefile'),
]
operations = [
migrations.CreateModel(
name='ArticleEvaluation',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.CharField(default='U', max_length=1, choices=[('U', 'Unclassified'), ('R', 'Rejected'), ('A', 'Accepted'), ('D', 'Duplicated')])),
('comments', models.TextField(max_length=2000, null=True, blank=True)),
],
),
migrations.RemoveField(
model_name='article',
name='comments',
),
migrations.RemoveField(
model_name='article',
name='selection_criteria',
),
migrations.RemoveField(
model_name='article',
name='status',
),
migrations.AddField(
model_name='articleevaluation',
name='article',
field=models.ForeignKey(related_name='evaluation_article', to='reviews.Article'),
),
migrations.AddField(
model_name='articleevaluation',
name='review',
field=models.ForeignKey(related_name='evaluation_review', to='reviews.Review'),
),
migrations.AddField(
model_name='articleevaluation',
name='selection_criteria',
field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL, blank=True, to='reviews.SelectionCriteria', null=True),
),
migrations.AddField(
model_name='articleevaluation',
name='user',
field=models.ForeignKey(related_name='evaluation_user', to=settings.AUTH_USER_MODEL),
),
]
| true | true |
f723ca1a6623fc6b30625bd969dcb19ab9192111 | 358 | py | Python | Lessons/simpleWebBrowser.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
] | null | null | null | Lessons/simpleWebBrowser.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
] | null | null | null | Lessons/simpleWebBrowser.py | Luderio/Scientific-Computing-with-Python | c7eebcc3b46b68b3d5c08ad25fb802ae9ff42f7f | [
"MIT"
] | null | null | null | #Simple We Browser using sockets
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True :
data = mysock.recv(512)
if len(data) < 1 :
break
print(data.decode())
mysock.close() | 23.866667 | 68 | 0.664804 |
import socket
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('data.pr4e.org', 80))
cmd = 'GET http://data.pr4e.org/romeo.txt HTTP/1.0\r\n\r\n'.encode()
mysock.send(cmd)
while True :
data = mysock.recv(512)
if len(data) < 1 :
break
print(data.decode())
mysock.close() | true | true |
f723ca2e7cf32da6dc08be2afefe8b8e7395f254 | 287 | py | Python | app.py | IIVIIIII/2020_Weathering | 0759f2848ca912c8f1f9875f18c8e0aa604948f5 | [
"MIT"
] | null | null | null | app.py | IIVIIIII/2020_Weathering | 0759f2848ca912c8f1f9875f18c8e0aa604948f5 | [
"MIT"
] | null | null | null | app.py | IIVIIIII/2020_Weathering | 0759f2848ca912c8f1f9875f18c8e0aa604948f5 | [
"MIT"
] | null | null | null | from flask import Flask, jsonify
import data4app
app = Flask(__name__)
@app.route("/")
def home():
return "Lets goooo!!!"
@app.route("/<var>")
def jsonified(var):
data = data4app.get_data(var)
return jsonify(data)
if __name__ == "__main__":
app.run(debug=True)
| 15.105263 | 33 | 0.648084 | from flask import Flask, jsonify
import data4app
app = Flask(__name__)
@app.route("/")
def home():
return "Lets goooo!!!"
@app.route("/<var>")
def jsonified(var):
data = data4app.get_data(var)
return jsonify(data)
if __name__ == "__main__":
app.run(debug=True)
| true | true |
f723ca8574f5c324eb54931b511ea756f5d6462f | 18,973 | py | Python | sdk/python/tekton_pipeline/models/pod_template.py | jmcshane/experimental | 3c47c7e87bcdadc6172941169f3f24fc3f159ae0 | [
"Apache-2.0"
] | null | null | null | sdk/python/tekton_pipeline/models/pod_template.py | jmcshane/experimental | 3c47c7e87bcdadc6172941169f3f24fc3f159ae0 | [
"Apache-2.0"
] | null | null | null | sdk/python/tekton_pipeline/models/pod_template.py | jmcshane/experimental | 3c47c7e87bcdadc6172941169f3f24fc3f159ae0 | [
"Apache-2.0"
] | 1 | 2020-07-30T15:55:45.000Z | 2020-07-30T15:55:45.000Z | # Copyright 2020 The Tekton Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
"""
Tekton
Tekton Pipeline # noqa: E501
The version of the OpenAPI document: v0.17.2
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from tekton_pipeline.configuration import Configuration
class PodTemplate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'affinity': 'V1Affinity',
'automount_service_account_token': 'bool',
'dns_config': 'V1PodDNSConfig',
'dns_policy': 'str',
'enable_service_links': 'bool',
'host_network': 'bool',
'image_pull_secrets': 'list[V1LocalObjectReference]',
'node_selector': 'dict(str, str)',
'priority_class_name': 'str',
'runtime_class_name': 'str',
'scheduler_name': 'str',
'security_context': 'V1PodSecurityContext',
'tolerations': 'list[V1Toleration]',
'volumes': 'list[V1Volume]'
}
attribute_map = {
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'dns_config': 'dnsConfig',
'dns_policy': 'dnsPolicy',
'enable_service_links': 'enableServiceLinks',
'host_network': 'hostNetwork',
'image_pull_secrets': 'imagePullSecrets',
'node_selector': 'nodeSelector',
'priority_class_name': 'priorityClassName',
'runtime_class_name': 'runtimeClassName',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'tolerations': 'tolerations',
'volumes': 'volumes'
}
def __init__(self, affinity=None, automount_service_account_token=None, dns_config=None, dns_policy=None, enable_service_links=None, host_network=None, image_pull_secrets=None, node_selector=None, priority_class_name=None, runtime_class_name=None, scheduler_name=None, security_context=None, tolerations=None, volumes=None, local_vars_configuration=None): # noqa: E501
"""PodTemplate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._affinity = None
self._automount_service_account_token = None
self._dns_config = None
self._dns_policy = None
self._enable_service_links = None
self._host_network = None
self._image_pull_secrets = None
self._node_selector = None
self._priority_class_name = None
self._runtime_class_name = None
self._scheduler_name = None
self._security_context = None
self._tolerations = None
self._volumes = None
self.discriminator = None
if affinity is not None:
self.affinity = affinity
if automount_service_account_token is not None:
self.automount_service_account_token = automount_service_account_token
if dns_config is not None:
self.dns_config = dns_config
if dns_policy is not None:
self.dns_policy = dns_policy
if enable_service_links is not None:
self.enable_service_links = enable_service_links
if host_network is not None:
self.host_network = host_network
if image_pull_secrets is not None:
self.image_pull_secrets = image_pull_secrets
if node_selector is not None:
self.node_selector = node_selector
if priority_class_name is not None:
self.priority_class_name = priority_class_name
if runtime_class_name is not None:
self.runtime_class_name = runtime_class_name
if scheduler_name is not None:
self.scheduler_name = scheduler_name
if security_context is not None:
self.security_context = security_context
if tolerations is not None:
self.tolerations = tolerations
if volumes is not None:
self.volumes = volumes
@property
def affinity(self):
"""Gets the affinity of this PodTemplate. # noqa: E501
:return: The affinity of this PodTemplate. # noqa: E501
:rtype: V1Affinity
"""
return self._affinity
@affinity.setter
def affinity(self, affinity):
"""Sets the affinity of this PodTemplate.
:param affinity: The affinity of this PodTemplate. # noqa: E501
:type: V1Affinity
"""
self._affinity = affinity
@property
def automount_service_account_token(self):
"""Gets the automount_service_account_token of this PodTemplate. # noqa: E501
AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. # noqa: E501
:return: The automount_service_account_token of this PodTemplate. # noqa: E501
:rtype: bool
"""
return self._automount_service_account_token
@automount_service_account_token.setter
def automount_service_account_token(self, automount_service_account_token):
"""Sets the automount_service_account_token of this PodTemplate.
AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. # noqa: E501
:param automount_service_account_token: The automount_service_account_token of this PodTemplate. # noqa: E501
:type: bool
"""
self._automount_service_account_token = automount_service_account_token
@property
def dns_config(self):
"""Gets the dns_config of this PodTemplate. # noqa: E501
:return: The dns_config of this PodTemplate. # noqa: E501
:rtype: V1PodDNSConfig
"""
return self._dns_config
@dns_config.setter
def dns_config(self, dns_config):
"""Sets the dns_config of this PodTemplate.
:param dns_config: The dns_config of this PodTemplate. # noqa: E501
:type: V1PodDNSConfig
"""
self._dns_config = dns_config
@property
def dns_policy(self):
"""Gets the dns_policy of this PodTemplate. # noqa: E501
Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. # noqa: E501
:return: The dns_policy of this PodTemplate. # noqa: E501
:rtype: str
"""
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
"""Sets the dns_policy of this PodTemplate.
Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. # noqa: E501
:param dns_policy: The dns_policy of this PodTemplate. # noqa: E501
:type: str
"""
self._dns_policy = dns_policy
@property
def enable_service_links(self):
"""Gets the enable_service_links of this PodTemplate. # noqa: E501
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501
:return: The enable_service_links of this PodTemplate. # noqa: E501
:rtype: bool
"""
return self._enable_service_links
@enable_service_links.setter
def enable_service_links(self, enable_service_links):
"""Sets the enable_service_links of this PodTemplate.
EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true. # noqa: E501
:param enable_service_links: The enable_service_links of this PodTemplate. # noqa: E501
:type: bool
"""
self._enable_service_links = enable_service_links
@property
def host_network(self):
"""Gets the host_network of this PodTemplate. # noqa: E501
HostNetwork specifies whether the pod may use the node network namespace # noqa: E501
:return: The host_network of this PodTemplate. # noqa: E501
:rtype: bool
"""
return self._host_network
@host_network.setter
def host_network(self, host_network):
"""Sets the host_network of this PodTemplate.
HostNetwork specifies whether the pod may use the node network namespace # noqa: E501
:param host_network: The host_network of this PodTemplate. # noqa: E501
:type: bool
"""
self._host_network = host_network
@property
def image_pull_secrets(self):
"""Gets the image_pull_secrets of this PodTemplate. # noqa: E501
ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified # noqa: E501
:return: The image_pull_secrets of this PodTemplate. # noqa: E501
:rtype: list[V1LocalObjectReference]
"""
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
"""Sets the image_pull_secrets of this PodTemplate.
ImagePullSecrets gives the name of the secret used by the pod to pull the image if specified # noqa: E501
:param image_pull_secrets: The image_pull_secrets of this PodTemplate. # noqa: E501
:type: list[V1LocalObjectReference]
"""
self._image_pull_secrets = image_pull_secrets
@property
def node_selector(self):
"""Gets the node_selector of this PodTemplate. # noqa: E501
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
:return: The node_selector of this PodTemplate. # noqa: E501
:rtype: dict(str, str)
"""
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
"""Sets the node_selector of this PodTemplate.
NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ # noqa: E501
:param node_selector: The node_selector of this PodTemplate. # noqa: E501
:type: dict(str, str)
"""
self._node_selector = node_selector
@property
def priority_class_name(self):
"""Gets the priority_class_name of this PodTemplate. # noqa: E501
If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
:return: The priority_class_name of this PodTemplate. # noqa: E501
:rtype: str
"""
return self._priority_class_name
@priority_class_name.setter
def priority_class_name(self, priority_class_name):
"""Sets the priority_class_name of this PodTemplate.
If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default. # noqa: E501
:param priority_class_name: The priority_class_name of this PodTemplate. # noqa: E501
:type: str
"""
self._priority_class_name = priority_class_name
@property
def runtime_class_name(self):
"""Gets the runtime_class_name of this PodTemplate. # noqa: E501
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. # noqa: E501
:return: The runtime_class_name of this PodTemplate. # noqa: E501
:rtype: str
"""
return self._runtime_class_name
@runtime_class_name.setter
def runtime_class_name(self, runtime_class_name):
"""Sets the runtime_class_name of this PodTemplate.
RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14. # noqa: E501
:param runtime_class_name: The runtime_class_name of this PodTemplate. # noqa: E501
:type: str
"""
self._runtime_class_name = runtime_class_name
@property
def scheduler_name(self):
"""Gets the scheduler_name of this PodTemplate. # noqa: E501
SchedulerName specifies the scheduler to be used to dispatch the Pod # noqa: E501
:return: The scheduler_name of this PodTemplate. # noqa: E501
:rtype: str
"""
return self._scheduler_name
@scheduler_name.setter
def scheduler_name(self, scheduler_name):
"""Sets the scheduler_name of this PodTemplate.
SchedulerName specifies the scheduler to be used to dispatch the Pod # noqa: E501
:param scheduler_name: The scheduler_name of this PodTemplate. # noqa: E501
:type: str
"""
self._scheduler_name = scheduler_name
@property
def security_context(self):
"""Gets the security_context of this PodTemplate. # noqa: E501
:return: The security_context of this PodTemplate. # noqa: E501
:rtype: V1PodSecurityContext
"""
return self._security_context
@security_context.setter
def security_context(self, security_context):
"""Sets the security_context of this PodTemplate.
:param security_context: The security_context of this PodTemplate. # noqa: E501
:type: V1PodSecurityContext
"""
self._security_context = security_context
@property
def tolerations(self):
"""Gets the tolerations of this PodTemplate. # noqa: E501
If specified, the pod's tolerations. # noqa: E501
:return: The tolerations of this PodTemplate. # noqa: E501
:rtype: list[V1Toleration]
"""
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
"""Sets the tolerations of this PodTemplate.
If specified, the pod's tolerations. # noqa: E501
:param tolerations: The tolerations of this PodTemplate. # noqa: E501
:type: list[V1Toleration]
"""
self._tolerations = tolerations
@property
def volumes(self):
"""Gets the volumes of this PodTemplate. # noqa: E501
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501
:return: The volumes of this PodTemplate. # noqa: E501
:rtype: list[V1Volume]
"""
return self._volumes
@volumes.setter
def volumes(self, volumes):
"""Sets the volumes of this PodTemplate.
List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes # noqa: E501
:param volumes: The volumes of this PodTemplate. # noqa: E501
:type: list[V1Volume]
"""
self._volumes = volumes
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PodTemplate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, PodTemplate):
return True
return self.to_dict() != other.to_dict()
| 38.329293 | 485 | 0.669056 |
import pprint
import re
import six
from tekton_pipeline.configuration import Configuration
class PodTemplate(object):
openapi_types = {
'affinity': 'V1Affinity',
'automount_service_account_token': 'bool',
'dns_config': 'V1PodDNSConfig',
'dns_policy': 'str',
'enable_service_links': 'bool',
'host_network': 'bool',
'image_pull_secrets': 'list[V1LocalObjectReference]',
'node_selector': 'dict(str, str)',
'priority_class_name': 'str',
'runtime_class_name': 'str',
'scheduler_name': 'str',
'security_context': 'V1PodSecurityContext',
'tolerations': 'list[V1Toleration]',
'volumes': 'list[V1Volume]'
}
attribute_map = {
'affinity': 'affinity',
'automount_service_account_token': 'automountServiceAccountToken',
'dns_config': 'dnsConfig',
'dns_policy': 'dnsPolicy',
'enable_service_links': 'enableServiceLinks',
'host_network': 'hostNetwork',
'image_pull_secrets': 'imagePullSecrets',
'node_selector': 'nodeSelector',
'priority_class_name': 'priorityClassName',
'runtime_class_name': 'runtimeClassName',
'scheduler_name': 'schedulerName',
'security_context': 'securityContext',
'tolerations': 'tolerations',
'volumes': 'volumes'
}
def __init__(self, affinity=None, automount_service_account_token=None, dns_config=None, dns_policy=None, enable_service_links=None, host_network=None, image_pull_secrets=None, node_selector=None, priority_class_name=None, runtime_class_name=None, scheduler_name=None, security_context=None, tolerations=None, volumes=None, local_vars_configuration=None):
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._affinity = None
self._automount_service_account_token = None
self._dns_config = None
self._dns_policy = None
self._enable_service_links = None
self._host_network = None
self._image_pull_secrets = None
self._node_selector = None
self._priority_class_name = None
self._runtime_class_name = None
self._scheduler_name = None
self._security_context = None
self._tolerations = None
self._volumes = None
self.discriminator = None
if affinity is not None:
self.affinity = affinity
if automount_service_account_token is not None:
self.automount_service_account_token = automount_service_account_token
if dns_config is not None:
self.dns_config = dns_config
if dns_policy is not None:
self.dns_policy = dns_policy
if enable_service_links is not None:
self.enable_service_links = enable_service_links
if host_network is not None:
self.host_network = host_network
if image_pull_secrets is not None:
self.image_pull_secrets = image_pull_secrets
if node_selector is not None:
self.node_selector = node_selector
if priority_class_name is not None:
self.priority_class_name = priority_class_name
if runtime_class_name is not None:
self.runtime_class_name = runtime_class_name
if scheduler_name is not None:
self.scheduler_name = scheduler_name
if security_context is not None:
self.security_context = security_context
if tolerations is not None:
self.tolerations = tolerations
if volumes is not None:
self.volumes = volumes
@property
def affinity(self):
return self._affinity
@affinity.setter
def affinity(self, affinity):
self._affinity = affinity
@property
def automount_service_account_token(self):
return self._automount_service_account_token
@automount_service_account_token.setter
def automount_service_account_token(self, automount_service_account_token):
self._automount_service_account_token = automount_service_account_token
@property
def dns_config(self):
return self._dns_config
@dns_config.setter
def dns_config(self, dns_config):
self._dns_config = dns_config
@property
def dns_policy(self):
return self._dns_policy
@dns_policy.setter
def dns_policy(self, dns_policy):
self._dns_policy = dns_policy
@property
def enable_service_links(self):
return self._enable_service_links
@enable_service_links.setter
def enable_service_links(self, enable_service_links):
self._enable_service_links = enable_service_links
@property
def host_network(self):
return self._host_network
@host_network.setter
def host_network(self, host_network):
self._host_network = host_network
@property
def image_pull_secrets(self):
return self._image_pull_secrets
@image_pull_secrets.setter
def image_pull_secrets(self, image_pull_secrets):
self._image_pull_secrets = image_pull_secrets
@property
def node_selector(self):
return self._node_selector
@node_selector.setter
def node_selector(self, node_selector):
self._node_selector = node_selector
@property
def priority_class_name(self):
return self._priority_class_name
@priority_class_name.setter
def priority_class_name(self, priority_class_name):
self._priority_class_name = priority_class_name
@property
def runtime_class_name(self):
return self._runtime_class_name
@runtime_class_name.setter
def runtime_class_name(self, runtime_class_name):
self._runtime_class_name = runtime_class_name
@property
def scheduler_name(self):
return self._scheduler_name
@scheduler_name.setter
def scheduler_name(self, scheduler_name):
self._scheduler_name = scheduler_name
@property
def security_context(self):
return self._security_context
@security_context.setter
def security_context(self, security_context):
self._security_context = security_context
@property
def tolerations(self):
return self._tolerations
@tolerations.setter
def tolerations(self, tolerations):
self._tolerations = tolerations
@property
def volumes(self):
return self._volumes
@volumes.setter
def volumes(self, volumes):
self._volumes = volumes
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, PodTemplate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
if not isinstance(other, PodTemplate):
return True
return self.to_dict() != other.to_dict()
| true | true |
f723cb619eaf3108159317787a8063eb46bfbdab | 413 | py | Python | experiments/fdtd-2d/tmp_files/144.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/fdtd-2d/tmp_files/144.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | experiments/fdtd-2d/tmp_files/144.py | LoopTilingBenchmark/benchmark | 52a3d2e70216552a498fd91de02a2fa9cb62122c | [
"BSD-2-Clause"
] | null | null | null | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/144.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,128,2)
tile(1,4,32,4)
tile(2,2,128,2)
tile(2,4,32,4)
tile(3,2,128,2)
tile(3,4,32,4)
| 22.944444 | 116 | 0.72155 | from chill import *
source('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/fdtd-2d/kernel.c')
destination('/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/experiments/fdtd-2d/tmp_files/144.c')
procedure('kernel_fdtd_2d')
loop(0)
known(' nx > 1 ')
known(' ny > 1 ')
tile(1,2,128,2)
tile(1,4,32,4)
tile(2,2,128,2)
tile(2,4,32,4)
tile(3,2,128,2)
tile(3,4,32,4)
| true | true |
f723cba44ec9074d909b91e8b3bb8d8e84df91e5 | 506 | py | Python | tests/test_base.py | not-nexus/shelf | ea59703082402ad3b6454482f0487418295fbd19 | [
"MIT"
] | 4 | 2016-11-07T13:02:18.000Z | 2019-09-03T02:04:05.000Z | tests/test_base.py | not-nexus/shelf | ea59703082402ad3b6454482f0487418295fbd19 | [
"MIT"
] | 21 | 2016-11-30T20:44:52.000Z | 2017-05-02T15:38:56.000Z | tests/test_base.py | not-nexus/shelf | ea59703082402ad3b6454482f0487418295fbd19 | [
"MIT"
] | 2 | 2017-01-24T14:36:04.000Z | 2020-01-13T16:10:05.000Z | import pyproctor
class TestBase(pyproctor.TestBase):
@classmethod
def setUpClass(cls):
"""
This exists to make sure that no matter what, tests
will log on stdout. Every call to basicConfig after
this point will be a no-op
"""
# AGI-731
# See jira for more information
#
# https://github.com/gabrielfalcao/HTTPretty/issues/280
#
# logging.basicConfig(
# stream=sys.stdout
# )
| 25.3 | 64 | 0.561265 | import pyproctor
class TestBase(pyproctor.TestBase):
@classmethod
def setUpClass(cls):
| true | true |
f723cc233a58a2785fb04f6b38b7dd40a29e256b | 97 | py | Python | tests/test_import.py | Quiltomics/indexd | 95274d40f16de881492c2db70a969eb77c8f5e7c | [
"Apache-2.0"
] | 2 | 2019-06-10T15:30:51.000Z | 2020-01-18T23:24:13.000Z | tests/test_import.py | lookcrabs/indexd | 646a7f336148496b07462ce3d3f8e930fa08a06c | [
"Apache-2.0"
] | 15 | 2019-03-19T21:57:31.000Z | 2021-08-11T21:01:33.000Z | tests/test_import.py | NCI-GDC/indexd | d159a82e7da100c807621bc41f2626dae64b4be9 | [
"Apache-2.0"
] | 1 | 2020-11-05T15:03:24.000Z | 2020-11-05T15:03:24.000Z | def test_import_index():
'''
Try to import the indexd package.
'''
import indexd
| 16.166667 | 37 | 0.608247 | def test_import_index():
import indexd
| true | true |
f723ccb1d235fd916b20e25c9c162899696706eb | 4,292 | py | Python | tests/python/contrib/test_gemm_acc32_vnni.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 9 | 2019-12-17T08:03:54.000Z | 2022-01-19T02:34:23.000Z | tests/python/contrib/test_gemm_acc32_vnni.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 2 | 2020-06-18T21:15:42.000Z | 2020-06-24T17:38:37.000Z | tests/python/contrib/test_gemm_acc32_vnni.py | jiangzoi/incubator-tvm | 144c6f45f7217b9df2f5605e06f0903e470ac11c | [
"Apache-2.0"
] | 3 | 2020-10-04T20:30:18.000Z | 2022-01-24T18:03:52.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=import-self, invalid-name, unused-argument, too-many-lines, len-as-condition
import tvm
from tvm import te
import numpy as np
from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
import pytest
@pytest.mark.skip("skip because feature not enabled")
def test_fc_int8_acc32():
m = 1024
n = 1024
k = 1024
X = te.placeholder((m, k), name='X', dtype="uint8")
W = te.placeholder((n, k), name='W', dtype="int8")
peak = 280
print("Peak {} Gops/s".format(peak))
memory_ops = m * k + n * k + 2 * m * n
gops_per_mm = 2 * m * n * k
# For LLVM < 8.0, it shows "'cascadelake' is not a recognized processor for this target
# (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the
# test, we should use cascadelake setting.
def verify(target="llvm -mcpu=cascadelake"):
if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
ctx = tvm.context(target, 0)
pc = dot_16x1x16_uint8_int8_int32_cascadelake()
ak = te.reduce_axis((0, k), name='k')
packedW = te.placeholder(
(n // 16, 16 * (k // 4), 4), name='packedW', dtype="int8")
t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype(
"int32") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype("int32"), axis=ak), name="F")
t_sch = te.create_schedule(t_fc.op)
a_x, a_y = t_fc.op.axis
a_k, = t_fc.op.reduce_axis
a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16)
a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32)
a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4)
a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4)
t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki)
t_sch[t_fc].unroll(a_koi)
t_sch[t_fc].tensorize(a_yi, pc)
t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic")
t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10)
# generate the plain data
a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(
n // 16, 16 * (k // 4), 4)).astype("int8")
# This occurs in pre_compute stage
for r_idx in range(n // 16):
for s_idx in range(16 * (k // 4)):
for t_idx in range(4):
packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx %
16][(s_idx // 16) * 4 + t_idx]
x = tvm.nd.array(a_, ctx)
w = tvm.nd.array(packW, ctx)
y = tvm.nd.array(np.zeros((m, n), dtype="int32"), ctx)
result = t_evaluator(x, w, y)
gops_per_sec = gops_per_mm / result.mean / 1e9
# verify the correctness
tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0)
print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format(
result.mean * 1000, gops_per_sec, gops_per_sec / peak))
t_func.export_library("tensorize_acc32.o")
verify()
if __name__ == "__main__":
# The test requires Cascade Lake and newer Intel machines to generate the
# correct AVX512 VNNI instruction. So, disabling the test.
# test_fc_int8_acc32()
pass
| 40.11215 | 107 | 0.627213 |
import tvm
from tvm import te
import numpy as np
from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32_cascadelake
from topi.x86.tensor_intrin import dot_16x1x16_uint8_int8_int32
import pytest
@pytest.mark.skip("skip because feature not enabled")
def test_fc_int8_acc32():
m = 1024
n = 1024
k = 1024
X = te.placeholder((m, k), name='X', dtype="uint8")
W = te.placeholder((n, k), name='W', dtype="int8")
peak = 280
print("Peak {} Gops/s".format(peak))
memory_ops = m * k + n * k + 2 * m * n
gops_per_mm = 2 * m * n * k
# (ignoring processor)" error with the following setting. After LLVM 8.0 is enabled in the
def verify(target="llvm -mcpu=cascadelake"):
if not tvm.runtime.enabled(target):
print("skip because %s is not enabled..." % target)
return
ctx = tvm.context(target, 0)
pc = dot_16x1x16_uint8_int8_int32_cascadelake()
ak = te.reduce_axis((0, k), name='k')
packedW = te.placeholder(
(n // 16, 16 * (k // 4), 4), name='packedW', dtype="int8")
t_fc = te.compute((m, n), lambda i, j: te.sum(X[i, ak].astype(
"int32") * packedW[j / 16, (ak / 4) * 16 + j % 16, ak % 4].astype("int32"), axis=ak), name="F")
t_sch = te.create_schedule(t_fc.op)
a_x, a_y = t_fc.op.axis
a_k, = t_fc.op.reduce_axis
a_yo, a_yi = t_sch[t_fc].split(a_y, factor=16)
a_xo, a_xi = t_sch[t_fc].split(a_x, factor=32)
a_ko, a_ki = t_sch[t_fc].split(a_k, factor=4)
a_koo, a_koi = t_sch[t_fc].split(a_ko, factor=4)
t_sch[t_fc].reorder(a_yo, a_xo, a_xi, a_koo, a_koi, a_yi, a_ki)
t_sch[t_fc].unroll(a_koi)
t_sch[t_fc].tensorize(a_yi, pc)
t_func = tvm.build(t_sch, [X, packedW, t_fc], target, name="intrinsic")
t_evaluator = t_func.time_evaluator(t_func.entry_name, ctx, number=10)
a_ = np.random.uniform(1, 10, size=(m, k)).astype("uint8")
b_ = np.random.uniform(1, 10, size=(n, k)).astype("int8")
packW = np.random.uniform(1, 10, size=(
n // 16, 16 * (k // 4), 4)).astype("int8")
for r_idx in range(n // 16):
for s_idx in range(16 * (k // 4)):
for t_idx in range(4):
packW[r_idx][s_idx][t_idx] = b_[r_idx * 16 + s_idx %
16][(s_idx // 16) * 4 + t_idx]
x = tvm.nd.array(a_, ctx)
w = tvm.nd.array(packW, ctx)
y = tvm.nd.array(np.zeros((m, n), dtype="int32"), ctx)
result = t_evaluator(x, w, y)
gops_per_sec = gops_per_mm / result.mean / 1e9
tvm.testing.assert_allclose(y.asnumpy(), np.dot(a_, b_.T), rtol=0)
print('Tensorization: running time: {:.3f} ms, {:.2f} Gops/s, effiency: {:.2f}'.format(
result.mean * 1000, gops_per_sec, gops_per_sec / peak))
t_func.export_library("tensorize_acc32.o")
verify()
if __name__ == "__main__":
pass
| true | true |
f723ccc97ea70d87a17b104645eede99a1853ac3 | 2,675 | py | Python | wings/api_client.py | KnowledgeCaptureAndDiscovery/wings-client | af1d068f4adc07d9060afa94dc99e0b2565be088 | [
"Apache-2.0"
] | null | null | null | wings/api_client.py | KnowledgeCaptureAndDiscovery/wings-client | af1d068f4adc07d9060afa94dc99e0b2565be088 | [
"Apache-2.0"
] | 8 | 2019-07-28T17:04:38.000Z | 2019-08-06T23:57:08.000Z | wings/api_client.py | KnowledgeCaptureAndDiscovery/wings-client | af1d068f4adc07d9060afa94dc99e0b2565be088 | [
"Apache-2.0"
] | 1 | 2019-07-29T22:53:41.000Z | 2019-07-29T22:53:41.000Z | import atexit
import importlib
import logging
import requests
class ApiClient:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.session = requests.Session()
self.libns = self.get_export_url() + "components/library.owl#"
self.dcdom = self.get_export_url() + "data/ontology.owl#"
self.dclib = self.get_export_url() + "data/library.owl#"
self.xsdns = "http://www.w3.org/2001/XMLSchema#"
self.topcls = "http://www.wings-workflows.org/ontology/component.owl#Component"
if self.login(kwargs["password"]) is False:
raise ValueError("Login failed")
atexit.register(self.logout)
def get_server(self):
return self.server
def get_username(self):
return self.username
def login(self, password):
self.session.get(self.server + "/sparql")
data = {"j_username": self.username, "j_password": password}
response = self.session.post(self.server + "/j_security_check", data)
if response.status_code == 403 or response.status_code == 200:
return True
return False
def logout(self):
self.session.get(self.server + "/jsp/login/logout.jsp")
self.session.close()
def session(self):
return self.session
def _initialize(self, name):
try:
module_ = importlib.import_module(".%s" % name, __package__)
try:
class_ = getattr(module_, name.title())
return class_(api_client=self)
except AttributeError:
logging.error("Class does not exist")
except ImportError:
logging.error("Module does not exist %s", name)
def close(self):
"""
Shutdown sessions across all instantiated services
"""
self.logout()
def __getattr__(self, attr):
try:
setattr(self, attr, self.kwargs[attr])
return getattr(self, attr)
except KeyError:
setattr(self, attr, self._initialize(attr))
return getattr(self, attr)
def get_request_url(self):
return self.server + "/users/" + self.username + "/" + self.domain + "/"
def get_export_url(self):
return (
self.export_url + "/export/users/" + self.username + "/" + self.domain + "/"
)
@staticmethod
def check_request(resp):
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise requests.exceptions.HTTPError
except requests.exceptions.RequestException:
raise requests.exceptions.RequestException
return resp
| 31.104651 | 88 | 0.603738 | import atexit
import importlib
import logging
import requests
class ApiClient:
def __init__(self, **kwargs):
self.kwargs = kwargs
self.session = requests.Session()
self.libns = self.get_export_url() + "components/library.owl#"
self.dcdom = self.get_export_url() + "data/ontology.owl#"
self.dclib = self.get_export_url() + "data/library.owl#"
self.xsdns = "http://www.w3.org/2001/XMLSchema#"
self.topcls = "http://www.wings-workflows.org/ontology/component.owl#Component"
if self.login(kwargs["password"]) is False:
raise ValueError("Login failed")
atexit.register(self.logout)
def get_server(self):
return self.server
def get_username(self):
return self.username
def login(self, password):
self.session.get(self.server + "/sparql")
data = {"j_username": self.username, "j_password": password}
response = self.session.post(self.server + "/j_security_check", data)
if response.status_code == 403 or response.status_code == 200:
return True
return False
def logout(self):
self.session.get(self.server + "/jsp/login/logout.jsp")
self.session.close()
def session(self):
return self.session
def _initialize(self, name):
try:
module_ = importlib.import_module(".%s" % name, __package__)
try:
class_ = getattr(module_, name.title())
return class_(api_client=self)
except AttributeError:
logging.error("Class does not exist")
except ImportError:
logging.error("Module does not exist %s", name)
def close(self):
self.logout()
def __getattr__(self, attr):
try:
setattr(self, attr, self.kwargs[attr])
return getattr(self, attr)
except KeyError:
setattr(self, attr, self._initialize(attr))
return getattr(self, attr)
def get_request_url(self):
return self.server + "/users/" + self.username + "/" + self.domain + "/"
def get_export_url(self):
return (
self.export_url + "/export/users/" + self.username + "/" + self.domain + "/"
)
@staticmethod
def check_request(resp):
try:
resp.raise_for_status()
except requests.exceptions.HTTPError:
raise requests.exceptions.HTTPError
except requests.exceptions.RequestException:
raise requests.exceptions.RequestException
return resp
| true | true |
f723ce701eeae1b756748f8b4615052fa8ad1b50 | 4,318 | py | Python | ramp-database/ramp_database/tests/test_testing.py | agramfort/ramp-board | 1c2cfe7af486e57ee0d4fb017b5266bb8ad152e3 | [
"BSD-3-Clause"
] | null | null | null | ramp-database/ramp_database/tests/test_testing.py | agramfort/ramp-board | 1c2cfe7af486e57ee0d4fb017b5266bb8ad152e3 | [
"BSD-3-Clause"
] | null | null | null | ramp-database/ramp_database/tests/test_testing.py | agramfort/ramp-board | 1c2cfe7af486e57ee0d4fb017b5266bb8ad152e3 | [
"BSD-3-Clause"
] | null | null | null | import os
import shutil
import pytest
from ramp_utils import read_config
from ramp_utils import generate_ramp_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.model import Model
from ramp_database.exceptions import NameClashError
from ramp_database.tools.user import get_user_by_name
from ramp_database.tools.event import get_problem
from ramp_database.testing import create_test_db
from ramp_database.testing import add_events
from ramp_database.testing import add_users
from ramp_database.testing import add_problems
from ramp_database.testing import ramp_config_boston_housing
from ramp_database.testing import ramp_config_iris
from ramp_database.testing import setup_ramp_kit_ramp_data
from ramp_database.testing import sign_up_teams_to_events
from ramp_database.testing import submit_all_starting_kits
@pytest.fixture(scope='module')
def database_config():
return read_config(database_config_template())
@pytest.fixture(scope='module')
def ramp_config():
return ramp_config_template()
@pytest.fixture
def session_scope_function(database_config, ramp_config):
try:
deployment_dir = create_test_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_ramp_kit_ramp_data(session_scope_function, ramp_config):
internal_ramp_config = generate_ramp_config(read_config(ramp_config))
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris')
msg_err = 'The RAMP kit repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris')
# retrieve the path to the ramp kit to remove it
shutil.rmtree(internal_ramp_config['ramp_kit_dir'])
msg_err = 'The RAMP data repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris')
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris', force=True)
def test_add_users(session_scope_function):
add_users(session_scope_function)
users = get_user_by_name(session_scope_function, None)
for user in users:
assert user.name in ('test_user', 'test_user_2', 'test_iris_admin')
err_msg = 'username is already in use'
with pytest.raises(NameClashError, match=err_msg):
add_users(session_scope_function)
def test_add_problems(session_scope_function):
add_problems(session_scope_function)
problems = get_problem(session_scope_function, None)
for problem in problems:
assert problem.name in ('iris', 'boston_housing')
# trying to add twice the same problem will raise a git error since the
# repositories already exist.
msg_err = 'The RAMP kit repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
add_problems(session_scope_function)
def test_add_events(session_scope_function):
add_problems(session_scope_function)
add_events(session_scope_function)
with pytest.raises(ValueError):
add_events(session_scope_function)
def test_sign_up_team_to_events(session_scope_function):
add_users(session_scope_function)
add_problems(session_scope_function)
add_events(session_scope_function)
sign_up_teams_to_events(session_scope_function)
def test_submit_all_starting_kits(session_scope_function):
add_users(session_scope_function)
add_problems(session_scope_function)
add_events(session_scope_function)
sign_up_teams_to_events(session_scope_function)
submit_all_starting_kits(session_scope_function)
def test_ramp_config_iris():
filename = ramp_config_iris()
assert os.path.join('tests', 'data', 'ramp_config_iris.yml') in filename
def test_ramp_config_boston_housing():
filename = ramp_config_boston_housing()
expected_path = os.path.join('tests', 'data',
'ramp_config_boston_housing.yml')
assert expected_path in filename
| 34.822581 | 76 | 0.789717 | import os
import shutil
import pytest
from ramp_utils import read_config
from ramp_utils import generate_ramp_config
from ramp_utils.testing import database_config_template
from ramp_utils.testing import ramp_config_template
from ramp_database.utils import setup_db
from ramp_database.utils import session_scope
from ramp_database.model import Model
from ramp_database.exceptions import NameClashError
from ramp_database.tools.user import get_user_by_name
from ramp_database.tools.event import get_problem
from ramp_database.testing import create_test_db
from ramp_database.testing import add_events
from ramp_database.testing import add_users
from ramp_database.testing import add_problems
from ramp_database.testing import ramp_config_boston_housing
from ramp_database.testing import ramp_config_iris
from ramp_database.testing import setup_ramp_kit_ramp_data
from ramp_database.testing import sign_up_teams_to_events
from ramp_database.testing import submit_all_starting_kits
@pytest.fixture(scope='module')
def database_config():
return read_config(database_config_template())
@pytest.fixture(scope='module')
def ramp_config():
return ramp_config_template()
@pytest.fixture
def session_scope_function(database_config, ramp_config):
try:
deployment_dir = create_test_db(database_config, ramp_config)
with session_scope(database_config['sqlalchemy']) as session:
yield session
finally:
shutil.rmtree(deployment_dir, ignore_errors=True)
db, _ = setup_db(database_config['sqlalchemy'])
Model.metadata.drop_all(db)
def test_ramp_kit_ramp_data(session_scope_function, ramp_config):
internal_ramp_config = generate_ramp_config(read_config(ramp_config))
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris')
msg_err = 'The RAMP kit repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris')
shutil.rmtree(internal_ramp_config['ramp_kit_dir'])
msg_err = 'The RAMP data repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris')
setup_ramp_kit_ramp_data(internal_ramp_config, 'iris', force=True)
def test_add_users(session_scope_function):
add_users(session_scope_function)
users = get_user_by_name(session_scope_function, None)
for user in users:
assert user.name in ('test_user', 'test_user_2', 'test_iris_admin')
err_msg = 'username is already in use'
with pytest.raises(NameClashError, match=err_msg):
add_users(session_scope_function)
def test_add_problems(session_scope_function):
add_problems(session_scope_function)
problems = get_problem(session_scope_function, None)
for problem in problems:
assert problem.name in ('iris', 'boston_housing')
msg_err = 'The RAMP kit repository was previously cloned.'
with pytest.raises(ValueError, match=msg_err):
add_problems(session_scope_function)
def test_add_events(session_scope_function):
add_problems(session_scope_function)
add_events(session_scope_function)
with pytest.raises(ValueError):
add_events(session_scope_function)
def test_sign_up_team_to_events(session_scope_function):
add_users(session_scope_function)
add_problems(session_scope_function)
add_events(session_scope_function)
sign_up_teams_to_events(session_scope_function)
def test_submit_all_starting_kits(session_scope_function):
add_users(session_scope_function)
add_problems(session_scope_function)
add_events(session_scope_function)
sign_up_teams_to_events(session_scope_function)
submit_all_starting_kits(session_scope_function)
def test_ramp_config_iris():
filename = ramp_config_iris()
assert os.path.join('tests', 'data', 'ramp_config_iris.yml') in filename
def test_ramp_config_boston_housing():
filename = ramp_config_boston_housing()
expected_path = os.path.join('tests', 'data',
'ramp_config_boston_housing.yml')
assert expected_path in filename
| true | true |
f723cf6dae464950f3d8b77edb889dcb5492b6ce | 4,453 | py | Python | python/ingestor/business.py | agahchen/RSBC-DataHub-API | d3742a09851d5753809e8eb8e1f7f6ca10b121ad | [
"Apache-2.0"
] | null | null | null | python/ingestor/business.py | agahchen/RSBC-DataHub-API | d3742a09851d5753809e8eb8e1f7f6ca10b121ad | [
"Apache-2.0"
] | null | null | null | python/ingestor/business.py | agahchen/RSBC-DataHub-API | d3742a09851d5753809e8eb8e1f7f6ca10b121ad | [
"Apache-2.0"
] | null | null | null | import python.common.middleware as middleware
import python.common.actions as actions
import python.common.rsi_email as rsi_email
import python.common.rest as rest
def get_available_time_slots() -> list:
"""
An application is ready for scheduling when all the payment rules are satisfied plus:
- the application has been paid
- the window to schedule the review has not elapsed
"""
return [
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.validate_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.get_payment_status, "fail": []},
{"try": middleware.received_valid_payment_status, "fail": []},
{"try": middleware.paid_not_more_than_24hrs_ago, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.review_has_not_been_scheduled, "fail": []},
{"try": middleware.get_application_details, "fail": []},
{"try": middleware.valid_application_received_from_vips, "fail": []},
{"try": middleware.get_invoice_details, "fail": []},
{"try": middleware.calculate_schedule_window, "fail": []},
{"try": middleware.query_review_times_available, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": middleware.query_for_additional_review_times, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": rsi_email.insufficient_reviews_available, "fail": []},
]}
]},
]
def ingest_form() -> list:
return [
{"try": middleware.content_type_is_xml, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.content_length_within_bounds, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.form_name_provided, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.validate_form_name, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.add_encrypt_at_rest_attribute, "fail": []},
{"try": middleware.convert_xml_to_dictionary_object, "fail": [
{"try": rest.server_error, "fail": []},
]},
{"try": middleware.get_xml_from_request, "fail": []},
{"try": middleware.base_64_encode_xml, "fail": []},
{"try": middleware.create_form_payload, "fail": []},
{"try": middleware.encode_payload, "fail": []},
{"try": middleware.get_queue_name_from_parameters, "fail": []},
{"try": actions.add_to_rabbitmq_queue, "fail": [
{"try": rest.server_error, "fail": []},
]},
# Useful for debugging: {"try": rsi_email.send_form_xml_to_admin, "fail": []},
{"try": rest.okay, "fail": []}
]
def is_okay_to_submit_evidence() -> list:
"""
Check to determine if an applicant can upload / submit evidence.
To submit evidence an applicant must have:
- submitted an application,
- paid for a review,
- scheduled a review date and
- the review date must be at 24 hours in the future
"""
return [
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.validate_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.review_has_been_scheduled, "fail": []},
{"try": middleware.is_review_more_than_48_hours_in_the_future, "fail": []}
]
| 46.385417 | 89 | 0.614193 | import python.common.middleware as middleware
import python.common.actions as actions
import python.common.rsi_email as rsi_email
import python.common.rest as rest
def get_available_time_slots() -> list:
return [
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.validate_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.get_payment_status, "fail": []},
{"try": middleware.received_valid_payment_status, "fail": []},
{"try": middleware.paid_not_more_than_24hrs_ago, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.review_has_not_been_scheduled, "fail": []},
{"try": middleware.get_application_details, "fail": []},
{"try": middleware.valid_application_received_from_vips, "fail": []},
{"try": middleware.get_invoice_details, "fail": []},
{"try": middleware.calculate_schedule_window, "fail": []},
{"try": middleware.query_review_times_available, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": middleware.query_for_additional_review_times, "fail": []},
{"try": middleware.does_applicant_have_enough_review_options, "fail": [
{"try": rsi_email.insufficient_reviews_available, "fail": []},
]}
]},
]
def ingest_form() -> list:
return [
{"try": middleware.content_type_is_xml, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.content_length_within_bounds, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.form_name_provided, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.validate_form_name, "fail": [
{"try": rest.failed_validation, "fail": []},
]},
{"try": middleware.add_encrypt_at_rest_attribute, "fail": []},
{"try": middleware.convert_xml_to_dictionary_object, "fail": [
{"try": rest.server_error, "fail": []},
]},
{"try": middleware.get_xml_from_request, "fail": []},
{"try": middleware.base_64_encode_xml, "fail": []},
{"try": middleware.create_form_payload, "fail": []},
{"try": middleware.encode_payload, "fail": []},
{"try": middleware.get_queue_name_from_parameters, "fail": []},
{"try": actions.add_to_rabbitmq_queue, "fail": [
{"try": rest.server_error, "fail": []},
]},
{"try": rest.okay, "fail": []}
]
def is_okay_to_submit_evidence() -> list:
return [
{"try": middleware.create_correlation_id, "fail": []},
{"try": middleware.determine_current_datetime, "fail": []},
{"try": middleware.clean_prohibition_number, "fail": []},
{"try": middleware.validate_prohibition_number, "fail": []},
{"try": middleware.get_vips_status, "fail": []},
{"try": middleware.prohibition_exists_in_vips, "fail": []},
{"try": middleware.user_submitted_last_name_matches_vips, "fail": []},
{"try": middleware.application_has_been_saved_to_vips, "fail": []},
{"try": middleware.application_has_been_paid, "fail": []},
{"try": middleware.review_has_been_scheduled, "fail": []},
{"try": middleware.is_review_more_than_48_hours_in_the_future, "fail": []}
]
| true | true |
f723cf8e92377defaba93082120d82ab6f27e07b | 343 | py | Python | tests/test_models.py | devopsmakers/dj-prosftpd | fcef6b4a5b8b872c9830eaf0315f0467dd1944ff | [
"MIT"
] | null | null | null | tests/test_models.py | devopsmakers/dj-prosftpd | fcef6b4a5b8b872c9830eaf0315f0467dd1944ff | [
"MIT"
] | null | null | null | tests/test_models.py | devopsmakers/dj-prosftpd | fcef6b4a5b8b872c9830eaf0315f0467dd1944ff | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dj-prosftpd
------------
Tests for `dj-prosftpd` models module.
"""
from django.test import TestCase
from dj_prosftpd import models
class TestDj_prosftpd(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| 13.192308 | 38 | 0.620991 |
from django.test import TestCase
from dj_prosftpd import models
class TestDj_prosftpd(TestCase):
def setUp(self):
pass
def test_something(self):
pass
def tearDown(self):
pass
| true | true |
f723cfc4543d31b1df927182b8dd2e714298c92e | 14,427 | py | Python | fermi_blind_search/database.py | giacomov/fermi_blind_search | f8d52cb8b61519223918d197682b4f70c78cce10 | [
"BSD-3-Clause"
] | null | null | null | fermi_blind_search/database.py | giacomov/fermi_blind_search | f8d52cb8b61519223918d197682b4f70c78cce10 | [
"BSD-3-Clause"
] | null | null | null | fermi_blind_search/database.py | giacomov/fermi_blind_search | f8d52cb8b61519223918d197682b4f70c78cce10 | [
"BSD-3-Clause"
] | 1 | 2017-04-01T10:42:07.000Z | 2017-04-01T10:42:07.000Z | #!/usr/bin/env python
from contextlib import contextmanager
import argparse
import sys
import sshtunnel
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from fermi_blind_search.configuration import get_config
from fermi_blind_search import myLogging
_logger = myLogging.log.getLogger("database")
# will store the engine that will connect to the database
_engine = None
# we need this to handle the tables
Base = declarative_base()
# defines the class that will connect to the database
Session = sessionmaker()
@contextmanager
def database_connection(config):
if config.get("SSH db tunnel", "remote_host") != '':
"""
As of now, we are not using this in the real time search. Instead we are using an autossh connection to
facilitate tunneling. However, we are keeping the code here in case an ssh tunnel needs to be established from
a python script in the future.
"""
with sshtunnel.SSHTunnelForwarder(config.get("SSH db tunnel", "remote_host"),
ssh_username=config.get("SSH db tunnel", "username"),
host_pkey_directories=[
config.get("SSH db tunnel", "key_directory")],
remote_bind_address=('127.0.0.1',
int(config.get("SSH db tunnel", "tunnel_port"))),
local_bind_address=('localhost',
int(config.get('Real time', 'db_port'))),
):
db_instance = Database(config)
try:
yield db_instance
except:
raise
finally:
db_instance.close()
else:
db_instance = Database(config)
try:
yield db_instance
except:
raise
finally:
db_instance.close()
class Database(object):
def __init__(self, config):
global Base
global Session
global _engine
# initialize the engine using parameters from the config file
if config.get("Real time", "is_sqlite") == "True":
engine_url = "sqlite:///" + config.get("Real time", "db_path")
else:
engine_url = config.get("Real time", "db_dialect") + "://" + config.get("Real time", "db_username") + ":" + \
config.get("Real time", "db_password") + "@" + config.get("Real time", "db_host") + ":" + \
config.get("Real time", "db_port") + "/" + config.get("Real time", "db_path")
_logger.debug("Database engine URL: %s" % engine_url)
_engine = create_engine(engine_url)
# bind the engine to the Base
Base.metadata.bind = _engine
# bind the engine to the session
Session.configure(bind=_engine)
self._config = config
def create_tables(self):
# create the Analysis and Results tables
Base.metadata.create_all(_engine)
_logger.info("Successfully created database tables")
def delete_analysis_table(self):
# drop the table from the DB
try:
Analysis.__table__.drop()
except:
try:
# another way to drop the table
Analysis.__table__.drop(_engine)
except:
_logger.error('ERROR: Could not delete Analysis Table')
raise
else:
_logger.info("Successfully deleted Analysis table")
def delete_results_table(self):
# drop the table from the DB
try:
Results.__table__.drop()
except:
try:
# another way to drop the table
Results.__table__.drop(_engine)
except:
_logger.error('ERROR: Could not delete Results Table')
raise
else:
_logger.info("Successfully delete Results table")
def add_analysis(self, analysis_vals):
# TODO: which check that analysis_vals contains the correct field?
# TODO: do we want to add a check that the analysis doesn't already exist?
assert (analysis_vals['met_start'] is not None and analysis_vals['duration'] is not None and
analysis_vals['counts'] is not None and analysis_vals['directory'] is not None), \
"One of the parameters to enter the analysis into the database is missing. Parameters are met_start, " \
"duration, counts, and directory"
assert isinstance(analysis_vals["counts"], int), "Counts is not an integer"
try:
# set the values of the analysis to be added to the table
new_analysis = Analysis(met_start=analysis_vals['met_start'], duration=analysis_vals['duration'],
counts=analysis_vals['counts'], directory=analysis_vals['directory'])
_logger.info("Adding this Analysis to the database: %s" % new_analysis)
except KeyError:
_logger.error('ERROR: The analysis you want to add does not have the proper fields!')
raise
except:
raise
else:
# open a session, add the analysis to the table, close the session
session = Session()
session.add(new_analysis)
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully added analysis to db")
def update_analysis_counts(self, met_start, duration, new_counts):
# open a session with the DB
session = Session()
# get the analysis to be updated
results = session.query(Analysis).filter(Analysis.met_start == met_start).filter(
Analysis.duration == duration).all()
# check that there is only one analysis that matches these parameters
assert len(results) != 0, "Cannot update this analysis because it does not exist"
assert len(results) == 1, 'More than one analysis exists with these parameters! This should never happen'
analysis = results[0]
_logger.info("Updating this analysis: %s to have %s counts" % (analysis, new_counts))
# update the counts column of the analysis in question
analysis.counts = new_counts
try:
# commit the change
session.commit()
except:
raise
else:
_logger.debug("Successfully updated analysis")
def add_candidate(self, candidate_vals):
# TODO: which check that condidate_vals contains the correct field?
# TODO: do we want to add a check that the candidate doesn't already exist?
assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and
candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None and
candidate_vals['email'] is not None), \
"One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \
"met_start, interval, email"
try:
# set the values of the result to be added to the table
new_candidate = Results(ra=candidate_vals['ra'], dec=candidate_vals['dec'],
met_start=candidate_vals['met_start'], interval=candidate_vals['interval'],
email=candidate_vals['email'])
_logger.info("Adding this result to the database %s" % new_candidate)
except KeyError:
_logger.error('ERROR: The result you want to add does not have the proper fields')
raise
except:
raise
else:
# open a session, add the result to the table, close the session
session = Session()
session.add(new_candidate)
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully added result to database")
return new_candidate
def get_analysis_between_times(self, start, stop):
_logger.info("Fetching analyses using data between %s and %s" % (start, stop))
# open a session
session = Session()
# get all analyses with met_start or met_stop (met_start + duration) times within the range [start,stop]
return session.query(Analysis).filter(or_(and_(Analysis.met_start >= start, Analysis.met_start <= stop),
and_(Analysis.met_start + Analysis.duration >= start,
Analysis.met_start + Analysis.duration <= stop))).all()
def get_exact_analysis(self, start, stop):
_logger.info("Fetching analysis with met_start = %s and met_start + duration = %s" % (start, stop))
# open a session
session = Session()
# get all analyses with start time and stop times exactly matching the parameters
return session.query(Analysis).filter(and_(Analysis.met_start == start,
Analysis.met_start + Analysis.duration == stop)).all()
def get_results(self, candidate_vals):
# check that candidate vals has the correct fields to perform a search
assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and
candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None), \
"One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \
"met_start, interval"
# open a session
session = Session()
# get the tolerance ranges for determining if we have a match
ra_tol = float(self._config.get("Real time", "ra_tol"))
dec_tol = float(self._config.get("Real time", "dec_tol"))
start_tol = float(self._config.get("Real time", "start_tol"))
int_tol = float(self._config.get("Real time", "int_tol"))
_logger.info("Fetching results within %s of ra, %s of dec, %s of met_start, and %s of interval of %s" %
(ra_tol, dec_tol, start_tol, int_tol, candidate_vals))
# get all results that match the passed candidate within a certain tolerance
return session.query(Results).filter(and_(candidate_vals['ra'] - ra_tol <= Results.ra,
Results.ra <= candidate_vals['ra'] + ra_tol,
candidate_vals['dec'] - dec_tol <= Results.dec,
Results.dec <= candidate_vals['dec'] + dec_tol,
candidate_vals['met_start'] - start_tol <= Results.met_start,
Results.met_start <= candidate_vals['met_start'] + start_tol,
candidate_vals['interval'] - int_tol <= Results.interval,
Results.interval <= candidate_vals['interval'] + int_tol)).all()
def get_results_to_email(self):
_logger.info("Fetching results with email = False (0 in database)")
# open a session
session = Session()
# get all results that have not been emailed yet
return session.query(Results).filter(Results.email == 0).all()
def update_result_email(self, candidate, email_val=False):
_logger.info("Updating result: %s to have email value: %s" % (candidate, email_val))
# open a session
session = Session()
# update the value of the candidate
candidate.email = email_val
try:
# commit the change
session.commit()
except:
raise
else:
_logger.debug("Successfully updated result")
def close(self):
global _logger
_logger.info("Closing database")
Session.close_all()
class Analysis(Base):
# give the table a name
__tablename__ = 'analysis'
# define the columns of the table
met_start = Column(Float(32), Sequence('analysis_met_start_seq'), primary_key=True)
duration = Column(Float(32), Sequence('analysis_duration_seq'), primary_key=True)
counts = Column(Integer)
directory = Column(String(250))
def __repr__(self):
# formatting string so that printing rows from the table is more readable
return "<Analysis(met_start= %s, duration= %s, counts= %s, directory= %s)>" % \
(self.met_start, self.duration, self.counts, self.directory)
class Results(Base):
# give the table a name
__tablename__ = 'results'
# define the columns of the table
ra = Column(Float(32))
dec = Column(Float(32))
met_start = Column(Float(32), Sequence('results_met_start_seq'), primary_key=True)
interval = Column(Float(32), Sequence('results_interval_seq'), primary_key=True)
email = Column(Boolean)
def __repr__(self):
# formatting string so that printing rows from the table is more readable
return "<Results(ra= %s, dec= %s, met_start= %s, interval= %s, email=%s)>" % (self.ra, self.dec,
self.met_start,
self.interval, self.email)
if __name__ == "__main__":
# Allows you to quickly delete and re-create the database.
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to config file', type=get_config, required=True)
parser.add_argument('--clear', help="If set, delete the database tables, and recreate them", action="store_true")
args = parser.parse_args()
configuration = args.config
# start db connection
db = Database(configuration)
if args.clear:
# delete the tables
db.delete_analysis_table()
db.delete_results_table()
# re-create the tables
db.create_tables()
| 37.27907 | 121 | 0.582311 |
from contextlib import contextmanager
import argparse
import sys
import sshtunnel
from sqlalchemy import *
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from fermi_blind_search.configuration import get_config
from fermi_blind_search import myLogging
_logger = myLogging.log.getLogger("database")
_engine = None
Base = declarative_base()
Session = sessionmaker()
@contextmanager
def database_connection(config):
if config.get("SSH db tunnel", "remote_host") != '':
with sshtunnel.SSHTunnelForwarder(config.get("SSH db tunnel", "remote_host"),
ssh_username=config.get("SSH db tunnel", "username"),
host_pkey_directories=[
config.get("SSH db tunnel", "key_directory")],
remote_bind_address=('127.0.0.1',
int(config.get("SSH db tunnel", "tunnel_port"))),
local_bind_address=('localhost',
int(config.get('Real time', 'db_port'))),
):
db_instance = Database(config)
try:
yield db_instance
except:
raise
finally:
db_instance.close()
else:
db_instance = Database(config)
try:
yield db_instance
except:
raise
finally:
db_instance.close()
class Database(object):
def __init__(self, config):
global Base
global Session
global _engine
if config.get("Real time", "is_sqlite") == "True":
engine_url = "sqlite:///" + config.get("Real time", "db_path")
else:
engine_url = config.get("Real time", "db_dialect") + "://" + config.get("Real time", "db_username") + ":" + \
config.get("Real time", "db_password") + "@" + config.get("Real time", "db_host") + ":" + \
config.get("Real time", "db_port") + "/" + config.get("Real time", "db_path")
_logger.debug("Database engine URL: %s" % engine_url)
_engine = create_engine(engine_url)
Base.metadata.bind = _engine
Session.configure(bind=_engine)
self._config = config
def create_tables(self):
Base.metadata.create_all(_engine)
_logger.info("Successfully created database tables")
def delete_analysis_table(self):
try:
Analysis.__table__.drop()
except:
try:
Analysis.__table__.drop(_engine)
except:
_logger.error('ERROR: Could not delete Analysis Table')
raise
else:
_logger.info("Successfully deleted Analysis table")
def delete_results_table(self):
try:
Results.__table__.drop()
except:
try:
Results.__table__.drop(_engine)
except:
_logger.error('ERROR: Could not delete Results Table')
raise
else:
_logger.info("Successfully delete Results table")
def add_analysis(self, analysis_vals):
assert (analysis_vals['met_start'] is not None and analysis_vals['duration'] is not None and
analysis_vals['counts'] is not None and analysis_vals['directory'] is not None), \
"One of the parameters to enter the analysis into the database is missing. Parameters are met_start, " \
"duration, counts, and directory"
assert isinstance(analysis_vals["counts"], int), "Counts is not an integer"
try:
# set the values of the analysis to be added to the table
new_analysis = Analysis(met_start=analysis_vals['met_start'], duration=analysis_vals['duration'],
counts=analysis_vals['counts'], directory=analysis_vals['directory'])
_logger.info("Adding this Analysis to the database: %s" % new_analysis)
except KeyError:
_logger.error('ERROR: The analysis you want to add does not have the proper fields!')
raise
except:
raise
else:
# open a session, add the analysis to the table, close the session
session = Session()
session.add(new_analysis)
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully added analysis to db")
def update_analysis_counts(self, met_start, duration, new_counts):
# open a session with the DB
session = Session()
# get the analysis to be updated
results = session.query(Analysis).filter(Analysis.met_start == met_start).filter(
Analysis.duration == duration).all()
# check that there is only one analysis that matches these parameters
assert len(results) != 0, "Cannot update this analysis because it does not exist"
assert len(results) == 1, 'More than one analysis exists with these parameters! This should never happen'
analysis = results[0]
_logger.info("Updating this analysis: %s to have %s counts" % (analysis, new_counts))
# update the counts column of the analysis in question
analysis.counts = new_counts
try:
# commit the change
session.commit()
except:
raise
else:
_logger.debug("Successfully updated analysis")
def add_candidate(self, candidate_vals):
# TODO: which check that condidate_vals contains the correct field?
# TODO: do we want to add a check that the candidate doesn't already exist?
assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and
candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None and
candidate_vals['email'] is not None), \
"One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \
"met_start, interval, email"
try:
new_candidate = Results(ra=candidate_vals['ra'], dec=candidate_vals['dec'],
met_start=candidate_vals['met_start'], interval=candidate_vals['interval'],
email=candidate_vals['email'])
_logger.info("Adding this result to the database %s" % new_candidate)
except KeyError:
_logger.error('ERROR: The result you want to add does not have the proper fields')
raise
except:
raise
else:
session = Session()
session.add(new_candidate)
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully added result to database")
return new_candidate
def get_analysis_between_times(self, start, stop):
_logger.info("Fetching analyses using data between %s and %s" % (start, stop))
session = Session()
return session.query(Analysis).filter(or_(and_(Analysis.met_start >= start, Analysis.met_start <= stop),
and_(Analysis.met_start + Analysis.duration >= start,
Analysis.met_start + Analysis.duration <= stop))).all()
def get_exact_analysis(self, start, stop):
_logger.info("Fetching analysis with met_start = %s and met_start + duration = %s" % (start, stop))
session = Session()
return session.query(Analysis).filter(and_(Analysis.met_start == start,
Analysis.met_start + Analysis.duration == stop)).all()
def get_results(self, candidate_vals):
assert (candidate_vals['ra'] is not None and candidate_vals['dec'] is not None and
candidate_vals['met_start'] is not None and candidate_vals['interval'] is not None), \
"One of the parameters to enter the candidate into the database is missing. Parameters are ra, dec, " \
"met_start, interval"
session = Session()
ra_tol = float(self._config.get("Real time", "ra_tol"))
dec_tol = float(self._config.get("Real time", "dec_tol"))
start_tol = float(self._config.get("Real time", "start_tol"))
int_tol = float(self._config.get("Real time", "int_tol"))
_logger.info("Fetching results within %s of ra, %s of dec, %s of met_start, and %s of interval of %s" %
(ra_tol, dec_tol, start_tol, int_tol, candidate_vals))
return session.query(Results).filter(and_(candidate_vals['ra'] - ra_tol <= Results.ra,
Results.ra <= candidate_vals['ra'] + ra_tol,
candidate_vals['dec'] - dec_tol <= Results.dec,
Results.dec <= candidate_vals['dec'] + dec_tol,
candidate_vals['met_start'] - start_tol <= Results.met_start,
Results.met_start <= candidate_vals['met_start'] + start_tol,
candidate_vals['interval'] - int_tol <= Results.interval,
Results.interval <= candidate_vals['interval'] + int_tol)).all()
def get_results_to_email(self):
_logger.info("Fetching results with email = False (0 in database)")
session = Session()
return session.query(Results).filter(Results.email == 0).all()
def update_result_email(self, candidate, email_val=False):
_logger.info("Updating result: %s to have email value: %s" % (candidate, email_val))
session = Session()
candidate.email = email_val
try:
session.commit()
except:
raise
else:
_logger.debug("Successfully updated result")
def close(self):
global _logger
_logger.info("Closing database")
Session.close_all()
class Analysis(Base):
__tablename__ = 'analysis'
met_start = Column(Float(32), Sequence('analysis_met_start_seq'), primary_key=True)
duration = Column(Float(32), Sequence('analysis_duration_seq'), primary_key=True)
counts = Column(Integer)
directory = Column(String(250))
def __repr__(self):
return "<Analysis(met_start= %s, duration= %s, counts= %s, directory= %s)>" % \
(self.met_start, self.duration, self.counts, self.directory)
class Results(Base):
__tablename__ = 'results'
ra = Column(Float(32))
dec = Column(Float(32))
met_start = Column(Float(32), Sequence('results_met_start_seq'), primary_key=True)
interval = Column(Float(32), Sequence('results_interval_seq'), primary_key=True)
email = Column(Boolean)
def __repr__(self):
return "<Results(ra= %s, dec= %s, met_start= %s, interval= %s, email=%s)>" % (self.ra, self.dec,
self.met_start,
self.interval, self.email)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--config', help='Path to config file', type=get_config, required=True)
parser.add_argument('--clear', help="If set, delete the database tables, and recreate them", action="store_true")
args = parser.parse_args()
configuration = args.config
db = Database(configuration)
if args.clear:
db.delete_analysis_table()
db.delete_results_table()
db.create_tables()
| true | true |
f723cfd084712344629373ab39822ddba59ccffa | 2,538 | py | Python | irrigator_pro/farms/migrations/0012_copy_field_list_to_field_for_waterhistory_and_probe.py | warnes/irrigatorpro | 4838f8832bdbf87f394a0298adc5dabfc26e82e8 | [
"MIT"
] | null | null | null | irrigator_pro/farms/migrations/0012_copy_field_list_to_field_for_waterhistory_and_probe.py | warnes/irrigatorpro | 4838f8832bdbf87f394a0298adc5dabfc26e82e8 | [
"MIT"
] | null | null | null | irrigator_pro/farms/migrations/0012_copy_field_list_to_field_for_waterhistory_and_probe.py | warnes/irrigatorpro | 4838f8832bdbf87f394a0298adc5dabfc26e82e8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import sys
from django.db import connection
def convert_probe_field_list_to_field(apps, schema_editor):
"""
For each current Probe, copy field_list[0] into field, and create
new records for field_list[1:]
** For some reason, probe.field_list.all() doesn't work here, so
use direct SQL instead. **
"""
Probe = apps.get_model("farms", "Probe")
cursor = connection.cursor()
cursor.execute("select * from farms_probe_field_list")
previous_id = -1
for (id, probe_id, field_id) in cursor.fetchall():
#print "Working on record #%d: Probe #%d, Field #%d" %(id, probe_id, field_id)
"""
If there are multiple fields assigned just preserve the first one
otherwise a unique constraing on the pair (cdop_season_id, radio_id)
will be violated.
This is necessary only for Alain's test server.
"""
if probe_id != previous_id:
previous_id = probe_id
probe = Probe.objects.get(id=probe_id)
if probe.field_id is None:
probe.field_id = field_id
else:
probe.id = None
probe.pk = None
probe.field_id = field_id
probe.save()
def convert_waterhistory_field_list_to_field(apps, schema_editor):
"""
For each current WaterHistory object, copy field_list[0] into
field, and create new records for field_list[1:]
** For some reason, waterhistory.field_list.all() doesn't work
here, so use direct SQL instead. **
"""
WaterHistory = apps.get_model("farms", "WaterHistory")
cursor = connection.cursor()
cursor.execute("select * from farms_waterhistory_field_list")
for (id, waterhistory_id, field_id) in cursor.fetchall():
#print "Working on ", (id, waterhistory_id, field_id)
waterhistory = WaterHistory.objects.get(id=waterhistory_id)
if waterhistory.field_id is None:
waterhistory.field_id = field_id
else:
waterhistory.id = None
waterhistory.pk = None
waterhistory.field_id = field_id
waterhistory.save()
class Migration(migrations.Migration):
dependencies = [
('farms', '0011_add_ForeignKey_for_field_to_waterhistory_and_probe'),
]
operations = [
migrations.RunPython( convert_probe_field_list_to_field ),
migrations.RunPython( convert_waterhistory_field_list_to_field),
]
| 29.511628 | 84 | 0.670607 |
from __future__ import unicode_literals
from django.db import models, migrations
import django.utils.timezone
import sys
from django.db import connection
def convert_probe_field_list_to_field(apps, schema_editor):
Probe = apps.get_model("farms", "Probe")
cursor = connection.cursor()
cursor.execute("select * from farms_probe_field_list")
previous_id = -1
for (id, probe_id, field_id) in cursor.fetchall():
if probe_id != previous_id:
previous_id = probe_id
probe = Probe.objects.get(id=probe_id)
if probe.field_id is None:
probe.field_id = field_id
else:
probe.id = None
probe.pk = None
probe.field_id = field_id
probe.save()
def convert_waterhistory_field_list_to_field(apps, schema_editor):
WaterHistory = apps.get_model("farms", "WaterHistory")
cursor = connection.cursor()
cursor.execute("select * from farms_waterhistory_field_list")
for (id, waterhistory_id, field_id) in cursor.fetchall():
waterhistory = WaterHistory.objects.get(id=waterhistory_id)
if waterhistory.field_id is None:
waterhistory.field_id = field_id
else:
waterhistory.id = None
waterhistory.pk = None
waterhistory.field_id = field_id
waterhistory.save()
class Migration(migrations.Migration):
dependencies = [
('farms', '0011_add_ForeignKey_for_field_to_waterhistory_and_probe'),
]
operations = [
migrations.RunPython( convert_probe_field_list_to_field ),
migrations.RunPython( convert_waterhistory_field_list_to_field),
]
| true | true |
f723d086e096878de8624880928432132d608eb2 | 7,000 | py | Python | rllib/agents/marwil/tests/test_marwil.py | kifarid/ray | 43c97c2afb979987be82fa50048674e9b6776d5d | [
"Apache-2.0"
] | 3 | 2021-08-29T20:41:21.000Z | 2022-01-31T18:47:51.000Z | rllib/agents/marwil/tests/test_marwil.py | kifarid/ray | 43c97c2afb979987be82fa50048674e9b6776d5d | [
"Apache-2.0"
] | 61 | 2021-06-05T07:05:08.000Z | 2022-03-19T07:14:56.000Z | rllib/agents/marwil/tests/test_marwil.py | kifarid/ray | 43c97c2afb979987be82fa50048674e9b6776d5d | [
"Apache-2.0"
] | null | null | null | import numpy as np
import os
from pathlib import Path
import unittest
import ray
import ray.rllib.agents.marwil as marwil
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.offline import JsonReader
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check, check_compute_single_action, \
framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class TestMARWIL(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_marwil_compilation_and_learning_from_offline_file(self):
"""Test whether a MARWILTrainer can be built with all frameworks.
Learns from a historic-data file.
To generate this data, first run:
$ ./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
"""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["evaluation_num_workers"] = 1
config["evaluation_interval"] = 2
# Evaluate on actual environment.
config["evaluation_config"] = {"input": "sampler"}
# Learn from offline data.
config["input"] = [data_file]
num_iterations = 350
min_reward = 70.0
# Test for all frameworks.
for _ in framework_iterator(config, frameworks=("tf", "torch")):
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
learnt = False
for i in range(num_iterations):
eval_results = trainer.train().get("evaluation")
if eval_results:
print("iter={} R={} ".format(
i, eval_results["episode_reward_mean"]))
# Learn until some reward is reached on an actual live env.
if eval_results["episode_reward_mean"] > min_reward:
print("learnt!")
learnt = True
break
if not learnt:
raise ValueError(
"MARWILTrainer did not reach {} reward from expert "
"offline data!".format(min_reward))
check_compute_single_action(
trainer, include_prev_action_reward=True)
trainer.stop()
def test_marwil_loss_function(self):
"""
To generate the historic data used in this test case, first run:
$ ./train.py --run=PPO --env=CartPole-v0 \
--stop='{"timesteps_total": 50000}' \
--config='{"output": "/tmp/out", "batch_mode": "complete_episodes"}'
"""
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/small.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 0 # Run locally.
# Learn from offline data.
config["input"] = [data_file]
for fw, sess in framework_iterator(config, session=True):
reader = JsonReader(inputs=[data_file])
batch = reader.next()
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
model = policy.model
# Calculate our own expected values (to then compare against the
# agent's loss output).
cummulative_rewards = compute_advantages(
batch, 0.0, config["gamma"], 1.0, False, False)["advantages"]
if fw == "torch":
cummulative_rewards = torch.tensor(cummulative_rewards)
if fw != "tf":
batch = policy._lazy_tensor_dict(batch)
model_out, _ = model.from_batch(batch)
vf_estimates = model.value_function()
if fw == "tf":
model_out, vf_estimates = \
policy.get_session().run([model_out, vf_estimates])
adv = cummulative_rewards - vf_estimates
if fw == "torch":
adv = adv.detach().cpu().numpy()
adv_squared = np.mean(np.square(adv))
c_2 = 100.0 + 1e-8 * (adv_squared - 100.0)
c = np.sqrt(c_2)
exp_advs = np.exp(config["beta"] * (adv / c))
dist = policy.dist_class(model_out, model)
logp = dist.logp(batch["actions"])
if fw == "torch":
logp = logp.detach().cpu().numpy()
elif fw == "tf":
logp = sess.run(logp)
# Calculate all expected loss components.
expected_vf_loss = 0.5 * adv_squared
expected_pol_loss = -1.0 * np.mean(exp_advs * logp)
expected_loss = \
expected_pol_loss + config["vf_coeff"] * expected_vf_loss
# Calculate the algorithm's loss (to check against our own
# calculation above).
batch.set_get_interceptor(None)
postprocessed_batch = policy.postprocess_trajectory(batch)
loss_func = marwil.marwil_tf_policy.marwil_loss if fw != "torch" \
else marwil.marwil_torch_policy.marwil_loss
if fw != "tf":
policy._lazy_tensor_dict(postprocessed_batch)
loss_out = loss_func(policy, model, policy.dist_class,
postprocessed_batch)
else:
loss_out, v_loss, p_loss = policy.get_session().run(
[policy._loss, policy.loss.v_loss, policy.loss.p_loss],
feed_dict=policy._get_loss_inputs_dict(
postprocessed_batch, shuffle=False))
# Check all components.
if fw == "torch":
check(policy.v_loss, expected_vf_loss, decimals=4)
check(policy.p_loss, expected_pol_loss, decimals=4)
elif fw == "tf":
check(v_loss, expected_vf_loss, decimals=4)
check(p_loss, expected_pol_loss, decimals=4)
else:
check(policy.loss.v_loss, expected_vf_loss, decimals=4)
check(policy.loss.p_loss, expected_pol_loss, decimals=4)
check(loss_out, expected_loss, decimals=3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| 41.666667 | 79 | 0.576857 | import numpy as np
import os
from pathlib import Path
import unittest
import ray
import ray.rllib.agents.marwil as marwil
from ray.rllib.evaluation.postprocessing import compute_advantages
from ray.rllib.offline import JsonReader
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import check, check_compute_single_action, \
framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class TestMARWIL(unittest.TestCase):
@classmethod
def setUpClass(cls):
ray.init(num_cpus=4)
@classmethod
def tearDownClass(cls):
ray.shutdown()
def test_marwil_compilation_and_learning_from_offline_file(self):
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/large.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 2
config["evaluation_num_workers"] = 1
config["evaluation_interval"] = 2
config["evaluation_config"] = {"input": "sampler"}
config["input"] = [data_file]
num_iterations = 350
min_reward = 70.0
for _ in framework_iterator(config, frameworks=("tf", "torch")):
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
learnt = False
for i in range(num_iterations):
eval_results = trainer.train().get("evaluation")
if eval_results:
print("iter={} R={} ".format(
i, eval_results["episode_reward_mean"]))
if eval_results["episode_reward_mean"] > min_reward:
print("learnt!")
learnt = True
break
if not learnt:
raise ValueError(
"MARWILTrainer did not reach {} reward from expert "
"offline data!".format(min_reward))
check_compute_single_action(
trainer, include_prev_action_reward=True)
trainer.stop()
def test_marwil_loss_function(self):
rllib_dir = Path(__file__).parent.parent.parent.parent
print("rllib dir={}".format(rllib_dir))
data_file = os.path.join(rllib_dir, "tests/data/cartpole/small.json")
print("data_file={} exists={}".format(data_file,
os.path.isfile(data_file)))
config = marwil.DEFAULT_CONFIG.copy()
config["num_workers"] = 0
config["input"] = [data_file]
for fw, sess in framework_iterator(config, session=True):
reader = JsonReader(inputs=[data_file])
batch = reader.next()
trainer = marwil.MARWILTrainer(config=config, env="CartPole-v0")
policy = trainer.get_policy()
model = policy.model
cummulative_rewards = compute_advantages(
batch, 0.0, config["gamma"], 1.0, False, False)["advantages"]
if fw == "torch":
cummulative_rewards = torch.tensor(cummulative_rewards)
if fw != "tf":
batch = policy._lazy_tensor_dict(batch)
model_out, _ = model.from_batch(batch)
vf_estimates = model.value_function()
if fw == "tf":
model_out, vf_estimates = \
policy.get_session().run([model_out, vf_estimates])
adv = cummulative_rewards - vf_estimates
if fw == "torch":
adv = adv.detach().cpu().numpy()
adv_squared = np.mean(np.square(adv))
c_2 = 100.0 + 1e-8 * (adv_squared - 100.0)
c = np.sqrt(c_2)
exp_advs = np.exp(config["beta"] * (adv / c))
dist = policy.dist_class(model_out, model)
logp = dist.logp(batch["actions"])
if fw == "torch":
logp = logp.detach().cpu().numpy()
elif fw == "tf":
logp = sess.run(logp)
# Calculate all expected loss components.
expected_vf_loss = 0.5 * adv_squared
expected_pol_loss = -1.0 * np.mean(exp_advs * logp)
expected_loss = \
expected_pol_loss + config["vf_coeff"] * expected_vf_loss
# Calculate the algorithm's loss (to check against our own
batch.set_get_interceptor(None)
postprocessed_batch = policy.postprocess_trajectory(batch)
loss_func = marwil.marwil_tf_policy.marwil_loss if fw != "torch" \
else marwil.marwil_torch_policy.marwil_loss
if fw != "tf":
policy._lazy_tensor_dict(postprocessed_batch)
loss_out = loss_func(policy, model, policy.dist_class,
postprocessed_batch)
else:
loss_out, v_loss, p_loss = policy.get_session().run(
[policy._loss, policy.loss.v_loss, policy.loss.p_loss],
feed_dict=policy._get_loss_inputs_dict(
postprocessed_batch, shuffle=False))
if fw == "torch":
check(policy.v_loss, expected_vf_loss, decimals=4)
check(policy.p_loss, expected_pol_loss, decimals=4)
elif fw == "tf":
check(v_loss, expected_vf_loss, decimals=4)
check(p_loss, expected_pol_loss, decimals=4)
else:
check(policy.loss.v_loss, expected_vf_loss, decimals=4)
check(policy.loss.p_loss, expected_pol_loss, decimals=4)
check(loss_out, expected_loss, decimals=3)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| true | true |
f723d0fdbd1846134fc03f67f0123e5aedd5f7e6 | 1,938 | py | Python | config.py | LucaMalavolta/q2 | d4cd62c3ea898c99334ea84e2b41ec75db9558f7 | [
"BSD-2-Clause"
] | null | null | null | config.py | LucaMalavolta/q2 | d4cd62c3ea898c99334ea84e2b41ec75db9558f7 | [
"BSD-2-Clause"
] | null | null | null | config.py | LucaMalavolta/q2 | d4cd62c3ea898c99334ea84e2b41ec75db9558f7 | [
"BSD-2-Clause"
] | null | null | null | import os
import logging
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, 'Data')
COLORTEFF_PATH = os.path.join(path, 'ColorTeff')
MODATM_PATH = os.path.join(path, 'ModelAtmospheres')
ISOCHRONES_PATH = os.path.join(path, 'Isochrones')
OTHER_PATH = os.path.join(path, 'Other')
plt.rc("font", family='serif', serif='Ubuntu', monospace='Ubuntu Mono', \
size=14)
plt.rc("axes", labelsize=15, titlesize=12)
plt.rc("xtick", top=True, direction='in', labelsize=14)
plt.rc("xtick.major", size=8, width=1)
plt.rc("ytick", right=True, direction='in', labelsize=14)
plt.rc("ytick.major", size=8, width=1)
plt.rc("lines", markersize=10, markeredgewidth=2)
plt.rc("lines", linewidth=3)
def moog_is_available():
"""You should be able to run MOOGSILENT from the command line in order
to use the MOOG features included in q2. This function checks if
MOOG is available on your system. If False, you wont be able to
connect q2 to MOOG and many things will fail.
"""
if os.system('which MOOGSILENT >/dev/null'):
logger.warning("MOOGSILENT is not available")
return False
else:
logger.info("MOOGSILENT is available")
return True
def data_are_available():
"""q2 needs data files with model atmosphere and isochrone grids.
These files can be downloaded from:
http://www.astrochasqui.com/projects/astro/share/q2Data.tar.gz
They need to be extracted inside the q2 directory.
'tar xvfz q2Data.tar.gz' will create the Data folder.
"""
if os.path.exists(path):
logger.info("Data folder exists")
return True
else:
logger.warning("Data folder does not exist. See the 'Data' section "\
"at https://github.com/astroChasqui/q2")
return False
| 36.566038 | 78 | 0.663055 | import os
import logging
import matplotlib.pyplot as plt
logger = logging.getLogger(__name__)
path = os.path.dirname(os.path.realpath(__file__))
path = os.path.join(path, 'Data')
COLORTEFF_PATH = os.path.join(path, 'ColorTeff')
MODATM_PATH = os.path.join(path, 'ModelAtmospheres')
ISOCHRONES_PATH = os.path.join(path, 'Isochrones')
OTHER_PATH = os.path.join(path, 'Other')
plt.rc("font", family='serif', serif='Ubuntu', monospace='Ubuntu Mono', \
size=14)
plt.rc("axes", labelsize=15, titlesize=12)
plt.rc("xtick", top=True, direction='in', labelsize=14)
plt.rc("xtick.major", size=8, width=1)
plt.rc("ytick", right=True, direction='in', labelsize=14)
plt.rc("ytick.major", size=8, width=1)
plt.rc("lines", markersize=10, markeredgewidth=2)
plt.rc("lines", linewidth=3)
def moog_is_available():
if os.system('which MOOGSILENT >/dev/null'):
logger.warning("MOOGSILENT is not available")
return False
else:
logger.info("MOOGSILENT is available")
return True
def data_are_available():
if os.path.exists(path):
logger.info("Data folder exists")
return True
else:
logger.warning("Data folder does not exist. See the 'Data' section "\
"at https://github.com/astroChasqui/q2")
return False
| true | true |
f723d13c5a9085305d4224504015e2a6d6c0eef1 | 1,341 | py | Python | concepts/recursion/fibonacci_sesies.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
] | 1 | 2021-02-19T11:00:11.000Z | 2021-02-19T11:00:11.000Z | concepts/recursion/fibonacci_sesies.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
] | null | null | null | concepts/recursion/fibonacci_sesies.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.8
"""
given a number N generate N no of fibonacci numbers
"""
from memoize import memoize
number = 25
fib_list = [None] * (number)
def fibonacci(num):
"""
fibonacci series using iteration
"""
a, b = 0, 1
for i in range(num-1):
fib_list[i], fib_list[i+1] = a, b
a, b = b, a+b
return a
fibonacci(number)
print("fibonacci series using iteration : ", fib_list)
fib_list = [None] * (number)
def fibonacci_a(num):
fib_list[0:2] = [0,1]
for i in range(2,num):
fib_list[i] = fib_list[i-2] + fib_list[i-1]
fibonacci_a(number)
print("fibonacci series using iteration : ", fib_list)
fib_list = [None] * (number)
def fibonacci1(num):
"""
fibonacci series using recursion
"""
global fib_list
if fib_list[num] is None:
if num == 0 or num == 1:
fib_list[num] = num
else:
fib_list[num] = fibonacci1(num-1) + fibonacci1(num-2)
return fib_list[num]
fibonacci1(number-1)
print("fibonacci series using recursion : ",fib_list)
fib_list = [None] * (number)
def fibonacci2(num):
"""
fibonacci series using recursion with memoization
"""
global fib_list
def fib(N):
if N==0 or N==1:
return N
else:
return fib(N-1) + fib(N-2)
fib = memoize(fib)
for i in range(num):
fib_list[i] = fib(i)
fibonacci2(number)
print("fibonacci series using recursion with memoization : ", fib_list) | 19.434783 | 71 | 0.671141 |
from memoize import memoize
number = 25
fib_list = [None] * (number)
def fibonacci(num):
a, b = 0, 1
for i in range(num-1):
fib_list[i], fib_list[i+1] = a, b
a, b = b, a+b
return a
fibonacci(number)
print("fibonacci series using iteration : ", fib_list)
fib_list = [None] * (number)
def fibonacci_a(num):
fib_list[0:2] = [0,1]
for i in range(2,num):
fib_list[i] = fib_list[i-2] + fib_list[i-1]
fibonacci_a(number)
print("fibonacci series using iteration : ", fib_list)
fib_list = [None] * (number)
def fibonacci1(num):
global fib_list
if fib_list[num] is None:
if num == 0 or num == 1:
fib_list[num] = num
else:
fib_list[num] = fibonacci1(num-1) + fibonacci1(num-2)
return fib_list[num]
fibonacci1(number-1)
print("fibonacci series using recursion : ",fib_list)
fib_list = [None] * (number)
def fibonacci2(num):
global fib_list
def fib(N):
if N==0 or N==1:
return N
else:
return fib(N-1) + fib(N-2)
fib = memoize(fib)
for i in range(num):
fib_list[i] = fib(i)
fibonacci2(number)
print("fibonacci series using recursion with memoization : ", fib_list) | true | true |
f723d38041e06dcc3fe6b444f4b546e4e28b9d49 | 754 | py | Python | pydatastructs/linear_data_structures/__init__.py | hpnightowl/pydatastructs | ec69ef887fee200390bff41ab6859a4ab0b26fbf | [
"BSD-3-Clause"
] | null | null | null | pydatastructs/linear_data_structures/__init__.py | hpnightowl/pydatastructs | ec69ef887fee200390bff41ab6859a4ab0b26fbf | [
"BSD-3-Clause"
] | null | null | null | pydatastructs/linear_data_structures/__init__.py | hpnightowl/pydatastructs | ec69ef887fee200390bff41ab6859a4ab0b26fbf | [
"BSD-3-Clause"
] | 1 | 2021-02-05T04:49:55.000Z | 2021-02-05T04:49:55.000Z | __all__ = []
from pydatastructs.linear_data_structures import arrays, linked_lists, algorithms
from pydatastructs.linear_data_structures.arrays import OneDimensionalArray, DynamicOneDimensionalArray, \
MultiDimensionalArray
from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel, brick_sort, brick_sort_parallel, \
heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, cocktail_shaker_sort, quick_sort, \
longest_common_subsequence
from pydatastructs.linear_data_structures.linked_lists import SinglyLinkedList, DoublyLinkedList, \
SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList
__all__.extend(arrays.__all__)
__all__.extend(linked_lists.__all__)
__all__.extend(algorithms.__all__)
| 37.7 | 115 | 0.855438 | __all__ = []
from pydatastructs.linear_data_structures import arrays, linked_lists, algorithms
from pydatastructs.linear_data_structures.arrays import OneDimensionalArray, DynamicOneDimensionalArray, \
MultiDimensionalArray
from pydatastructs.linear_data_structures.algorithms import merge_sort_parallel, brick_sort, brick_sort_parallel, \
heapsort, matrix_multiply_parallel, counting_sort, bucket_sort, cocktail_shaker_sort, quick_sort, \
longest_common_subsequence
from pydatastructs.linear_data_structures.linked_lists import SinglyLinkedList, DoublyLinkedList, \
SinglyCircularLinkedList, DoublyCircularLinkedList, SkipList
__all__.extend(arrays.__all__)
__all__.extend(linked_lists.__all__)
__all__.extend(algorithms.__all__)
| true | true |
f723d3bddf9b5128923113de82df064942d27440 | 391 | py | Python | Epitome/wsgi.py | pbout/ept | 7da64e606c9c163ffc8285cd8c77288807f4f477 | [
"RSA-MD"
] | null | null | null | Epitome/wsgi.py | pbout/ept | 7da64e606c9c163ffc8285cd8c77288807f4f477 | [
"RSA-MD"
] | null | null | null | Epitome/wsgi.py | pbout/ept | 7da64e606c9c163ffc8285cd8c77288807f4f477 | [
"RSA-MD"
] | null | null | null | """
WSGI config for Epitome project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Epitome.settings")
application = get_wsgi_application()
| 23 | 78 | 0.785166 |
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Epitome.settings")
application = get_wsgi_application()
| true | true |
f723d4daf4d337355a5c29418d6ad94c37865a78 | 3,109 | py | Python | meiduotest/meiduotest/settings.py | littrell0/meiduo_project_test | 3d2f6a6e528e6c45a5ffc1db0ddc5de9b4e52bf8 | [
"MIT"
] | null | null | null | meiduotest/meiduotest/settings.py | littrell0/meiduo_project_test | 3d2f6a6e528e6c45a5ffc1db0ddc5de9b4e52bf8 | [
"MIT"
] | null | null | null | meiduotest/meiduotest/settings.py | littrell0/meiduo_project_test | 3d2f6a6e528e6c45a5ffc1db0ddc5de9b4e52bf8 | [
"MIT"
] | null | null | null | """
Django settings for meiduotest project.
Generated by 'django-admin startproject' using Django 1.11.11.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '8zj*1u%iy2m+d0hg$@#5d(nrr5_-2u))cqkf^_3wj&f3ayr)o='
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduotest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduotest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| 25.694215 | 91 | 0.69733 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '8zj*1u%iy2m+d0hg$@#5d(nrr5_-2u))cqkf^_3wj&f3ayr)o='
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'meiduotest.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meiduotest.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f723d5a84455b1eb99d7bfc740ea30ad3707a3a0 | 4,334 | py | Python | mmdet/models/seg_heads/panoptic_fpn_head.py | SeHwanJoo/mmdetection_body | 1e1cadc6df91926fc99c4afbae383df0ea9cfed3 | [
"Apache-2.0"
] | 7 | 2021-08-08T08:34:30.000Z | 2022-01-10T18:37:47.000Z | mmdet/models/seg_heads/panoptic_fpn_head.py | SeHwanJoo/mmdetection_body | 1e1cadc6df91926fc99c4afbae383df0ea9cfed3 | [
"Apache-2.0"
] | null | null | null | mmdet/models/seg_heads/panoptic_fpn_head.py | SeHwanJoo/mmdetection_body | 1e1cadc6df91926fc99c4afbae383df0ea9cfed3 | [
"Apache-2.0"
] | 2 | 2021-08-11T05:57:50.000Z | 2022-01-04T11:13:32.000Z | import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from ..builder import HEADS
from ..utils import ConvUpsample
from .base_semantic_head import BaseSemanticHead
@HEADS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
Arg:
num_classes (int): Number of classes, including all stuff
classes and one thing class.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
`end_level`-th layer will not be used.
fg_range (tuple): Range of the foreground classes.
bg_range (tuple): Range of the background classes.
conv_cfg (dict): Dictionary to construct and config
conv layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use ``GN`` by default.
init_cfg (dict or list[dict], optional): Initialization config dict.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
fg_range=(1, 80),
bg_range=(81, 133),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)
self.fg_range = fg_range
self.bg_range = bg_range
self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1
self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg):
"""Merge thing classes to one class."""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (
gt_semantic_seg <= self.fg_range[1])
bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (
gt_semantic_seg <= self.bg_range[1])
new_gt_seg = fg_mask.int() * (self.bg_nums + 1)
new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,
new_gt_seg)
return new_gt_seg
def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):
"""The loss of PanopticFPN head.
Things classes will be merged to one class in PanopticFPN.
"""
gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)
return super().loss(seg_preds, gt_semantic_seg, label_bias)
def init_weights(self):
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x):
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(feats)
out = dict(seg_preds=seg_preds, feats=feats)
return out
| 38.696429 | 78 | 0.601292 | import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from ..builder import HEADS
from ..utils import ConvUpsample
from .base_semantic_head import BaseSemanticHead
@HEADS.register_module()
class PanopticFPNHead(BaseSemanticHead):
def __init__(self,
num_classes,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
fg_range=(1, 80),
bg_range=(81, 133),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
super(PanopticFPNHead, self).__init__(num_classes, init_cfg, loss_seg)
self.fg_range = fg_range
self.bg_range = bg_range
self.fg_nums = self.fg_range[1] - self.fg_range[0] + 1
self.bg_nums = self.bg_range[1] - self.bg_range[0] + 1
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg):
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = (gt_semantic_seg >= self.fg_range[0]) * (
gt_semantic_seg <= self.fg_range[1])
bg_mask = (gt_semantic_seg >= self.bg_range[0]) * (
gt_semantic_seg <= self.bg_range[1])
new_gt_seg = fg_mask.int() * (self.bg_nums + 1)
new_gt_seg = torch.where(bg_mask, gt_semantic_seg - self.fg_nums,
new_gt_seg)
return new_gt_seg
def loss(self, seg_preds, gt_semantic_seg, label_bias=-1):
gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)
return super().loss(seg_preds, gt_semantic_seg, label_bias)
def init_weights(self):
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x):
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(feats)
out = dict(seg_preds=seg_preds, feats=feats)
return out
| true | true |
f723d8eb5daadf4e81d3c79aabb857dd8a2823c2 | 276 | py | Python | dashboard/apps.py | lynetteoh/COVID19dashboard | 61193d35acf004999443c47a30f0b9f9c6220c03 | [
"MIT"
] | null | null | null | dashboard/apps.py | lynetteoh/COVID19dashboard | 61193d35acf004999443c47a30f0b9f9c6220c03 | [
"MIT"
] | null | null | null | dashboard/apps.py | lynetteoh/COVID19dashboard | 61193d35acf004999443c47a30f0b9f9c6220c03 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
import pandas as pd
import sys
class DashboardConfig(AppConfig):
name = 'dashboard'
def ready(self):
if 'runserver' not in sys.argv:
return True
from dashboard.models import Case, State, Country
| 21.230769 | 57 | 0.65942 | from django.apps import AppConfig
import pandas as pd
import sys
class DashboardConfig(AppConfig):
name = 'dashboard'
def ready(self):
if 'runserver' not in sys.argv:
return True
from dashboard.models import Case, State, Country
| true | true |
f723d9b3d877c48369368b51d3310d0d2b722ba2 | 372 | py | Python | exercicios-turtle/.history/flower_20210624115429.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | 1 | 2021-11-28T18:49:21.000Z | 2021-11-28T18:49:21.000Z | exercicios-turtle/.history/flower_20210624115429.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | null | null | null | exercicios-turtle/.history/flower_20210624115429.py | Aleff13/poo-ufsc | bc1574df26f840a3c0fd5b1e0c72e5d69f61493d | [
"MIT"
] | null | null | null | import turtle
tortuguita = turtle.Turtle()
tortuguita.color('blue')
tortuguita.speed(100)
for i in range (18):
tortuguita.circle(200,100)
tortuguita.left(110)
tortuguita.up()
tortuguita.left(35)
tortuguita.forward(160)
tortuguita.down()
tortuguita.dot(70,"black")
tortuguita.left(35)
tortuguita.up()
tortuguita.color('green')
tortuguita.forward()
turtle.done() | 17.714286 | 30 | 0.752688 | import turtle
tortuguita = turtle.Turtle()
tortuguita.color('blue')
tortuguita.speed(100)
for i in range (18):
tortuguita.circle(200,100)
tortuguita.left(110)
tortuguita.up()
tortuguita.left(35)
tortuguita.forward(160)
tortuguita.down()
tortuguita.dot(70,"black")
tortuguita.left(35)
tortuguita.up()
tortuguita.color('green')
tortuguita.forward()
turtle.done() | true | true |
f723d9cabe29840070a01c4f2c8c2878b8b99f27 | 5,505 | py | Python | ccc/elasticsearch.py | mliepold/cc-utils | 3f8c4b0d11d6a52d1605026f478371411daab81e | [
"BSD-3-Clause"
] | 15 | 2018-04-18T13:25:30.000Z | 2022-03-04T09:25:41.000Z | ccc/elasticsearch.py | mliepold/cc-utils | 3f8c4b0d11d6a52d1605026f478371411daab81e | [
"BSD-3-Clause"
] | 221 | 2018-04-12T06:29:43.000Z | 2022-03-27T03:01:40.000Z | ccc/elasticsearch.py | mliepold/cc-utils | 3f8c4b0d11d6a52d1605026f478371411daab81e | [
"BSD-3-Clause"
] | 29 | 2018-04-11T14:42:23.000Z | 2021-11-09T16:26:32.000Z | # Copyright (c) 2019-2020 SAP SE or an SAP affiliate company. All rights reserved. This file is
# licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import functools
import os
import json
import elasticsearch
import ci.util
import concourse.util
import model.elasticsearch
def default_client_if_available():
if not ci.util._running_on_ci():
return None
cfg_factory = ci.util.ctx().cfg_factory()
cfg_set = cfg_factory.cfg_set(ci.util.current_config_set_name())
es_config = cfg_set.elasticsearch()
return from_cfg(elasticsearch_cfg=es_config)
def from_cfg(
elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig
):
return ElasticSearchClient(
elasticsearch=_from_cfg(elasticsearch_cfg=elasticsearch_cfg)
)
def _from_cfg(
elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig
):
credentials = elasticsearch_cfg.credentials()
return elasticsearch.Elasticsearch(
elasticsearch_cfg.endpoints(),
http_auth=(credentials.username(), credentials.passwd()),
)
@functools.lru_cache()
def _metadata_dict():
# XXX mv to concourse package; deduplicate with notify step
if not ci.util._running_on_ci():
return {}
build = concourse.util.find_own_running_build()
pipeline_metadata = concourse.util.get_pipeline_metadata()
config_set = ci.util.ctx().cfg_factory().cfg_set(pipeline_metadata.current_config_set_name)
concourse_cfg = config_set.concourse()
meta_dict = {
'build-id': build.id(),
'build-name': build.build_number(),
'build-job-name': pipeline_metadata.job_name,
'build-team-name': pipeline_metadata.team_name,
'build-pipeline-name': pipeline_metadata.pipeline_name,
'atc-external-url': concourse_cfg.external_url(),
}
# XXX deduplicate; mv to concourse package
meta_dict['concourse_url'] = ci.util.urljoin(
meta_dict['atc-external-url'],
'teams',
meta_dict['build-team-name'],
'pipelines',
meta_dict['build-pipeline-name'],
'jobs',
meta_dict['build-job-name'],
'builds',
meta_dict['build-name'],
)
# XXX do not hard-code env variables
meta_dict['effective_version'] = os.environ.get('EFFECTIVE_VERSION')
meta_dict['component_name'] = os.environ.get('COMPONENT_NAME')
meta_dict['creation_date'] = datetime.datetime.now().isoformat()
return meta_dict
class ElasticSearchClient:
def __init__(
self,
elasticsearch: elasticsearch.Elasticsearch,
):
self._api = elasticsearch
def store_document(
self,
index: str,
body: dict,
inject_metadata=True,
*args,
**kwargs,
):
ci.util.check_type(index, str)
ci.util.check_type(body, dict)
if 'doc_type' in kwargs:
raise ValueError(
'''
doc_type attribute has been deprecated - see:
https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html
'''
)
if inject_metadata and _metadata_dict():
md = _metadata_dict()
body['cc_meta'] = md
return self._api.index(
index=index,
doc_type='_doc',
body=body,
*args,
**kwargs,
)
def store_documents(
self,
index: str,
body: [dict],
inject_metadata=True,
*args,
**kwargs,
):
# Bulk-loading uses a special format: A json specifying index name and doc-type
# (always _doc) followed by the actual document json. These pairs (one for each document)
# are then converted to newline delimited json
# The index json does not change for bulk-loading into a single index.
index_json = json.dumps({
'index': {
'_index': index,
'_type': '_doc'
}
})
return self.store_bulk(
body='\n'.join([f'{index_json}\n{json.dumps(d)}' for d in body]),
inject_metadata=inject_metadata,
*args,
**kwargs,
)
def store_bulk(
self,
body: str,
inject_metadata=True,
*args,
**kwargs,
):
ci.util.check_type(body, str)
if inject_metadata and _metadata_dict():
def inject_meta(line):
parsed = json.loads(line)
if 'index' not in parsed:
parsed['cc_meta'] = md
return json.dumps(parsed)
return line
md = _metadata_dict()
patched_body = '\n'.join([inject_meta(line) for line in body.splitlines()])
body = patched_body
return self._api.bulk(
body=body,
*args,
**kwargs,
)
| 29.438503 | 97 | 0.622525 |
import datetime
import functools
import os
import json
import elasticsearch
import ci.util
import concourse.util
import model.elasticsearch
def default_client_if_available():
if not ci.util._running_on_ci():
return None
cfg_factory = ci.util.ctx().cfg_factory()
cfg_set = cfg_factory.cfg_set(ci.util.current_config_set_name())
es_config = cfg_set.elasticsearch()
return from_cfg(elasticsearch_cfg=es_config)
def from_cfg(
elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig
):
return ElasticSearchClient(
elasticsearch=_from_cfg(elasticsearch_cfg=elasticsearch_cfg)
)
def _from_cfg(
elasticsearch_cfg:model.elasticsearch.ElasticSearchConfig
):
credentials = elasticsearch_cfg.credentials()
return elasticsearch.Elasticsearch(
elasticsearch_cfg.endpoints(),
http_auth=(credentials.username(), credentials.passwd()),
)
@functools.lru_cache()
def _metadata_dict():
if not ci.util._running_on_ci():
return {}
build = concourse.util.find_own_running_build()
pipeline_metadata = concourse.util.get_pipeline_metadata()
config_set = ci.util.ctx().cfg_factory().cfg_set(pipeline_metadata.current_config_set_name)
concourse_cfg = config_set.concourse()
meta_dict = {
'build-id': build.id(),
'build-name': build.build_number(),
'build-job-name': pipeline_metadata.job_name,
'build-team-name': pipeline_metadata.team_name,
'build-pipeline-name': pipeline_metadata.pipeline_name,
'atc-external-url': concourse_cfg.external_url(),
}
meta_dict['concourse_url'] = ci.util.urljoin(
meta_dict['atc-external-url'],
'teams',
meta_dict['build-team-name'],
'pipelines',
meta_dict['build-pipeline-name'],
'jobs',
meta_dict['build-job-name'],
'builds',
meta_dict['build-name'],
)
meta_dict['effective_version'] = os.environ.get('EFFECTIVE_VERSION')
meta_dict['component_name'] = os.environ.get('COMPONENT_NAME')
meta_dict['creation_date'] = datetime.datetime.now().isoformat()
return meta_dict
class ElasticSearchClient:
def __init__(
self,
elasticsearch: elasticsearch.Elasticsearch,
):
self._api = elasticsearch
def store_document(
self,
index: str,
body: dict,
inject_metadata=True,
*args,
**kwargs,
):
ci.util.check_type(index, str)
ci.util.check_type(body, dict)
if 'doc_type' in kwargs:
raise ValueError(
'''
doc_type attribute has been deprecated - see:
https://www.elastic.co/guide/en/elasticsearch/reference/6.0/removal-of-types.html
'''
)
if inject_metadata and _metadata_dict():
md = _metadata_dict()
body['cc_meta'] = md
return self._api.index(
index=index,
doc_type='_doc',
body=body,
*args,
**kwargs,
)
def store_documents(
self,
index: str,
body: [dict],
inject_metadata=True,
*args,
**kwargs,
):
index_json = json.dumps({
'index': {
'_index': index,
'_type': '_doc'
}
})
return self.store_bulk(
body='\n'.join([f'{index_json}\n{json.dumps(d)}' for d in body]),
inject_metadata=inject_metadata,
*args,
**kwargs,
)
def store_bulk(
self,
body: str,
inject_metadata=True,
*args,
**kwargs,
):
ci.util.check_type(body, str)
if inject_metadata and _metadata_dict():
def inject_meta(line):
parsed = json.loads(line)
if 'index' not in parsed:
parsed['cc_meta'] = md
return json.dumps(parsed)
return line
md = _metadata_dict()
patched_body = '\n'.join([inject_meta(line) for line in body.splitlines()])
body = patched_body
return self._api.bulk(
body=body,
*args,
**kwargs,
)
| true | true |
f723da3d31fc0d1c210b41c7779cc8ee4f4cd08c | 6,876 | py | Python | src/wallet/wallet_block_store.py | DONG-Jason/chia-blockchain | 27b28d62f6b315e45bc00231e007c775f07a414a | [
"Apache-2.0"
] | null | null | null | src/wallet/wallet_block_store.py | DONG-Jason/chia-blockchain | 27b28d62f6b315e45bc00231e007c775f07a414a | [
"Apache-2.0"
] | null | null | null | src/wallet/wallet_block_store.py | DONG-Jason/chia-blockchain | 27b28d62f6b315e45bc00231e007c775f07a414a | [
"Apache-2.0"
] | null | null | null | from typing import Dict, Optional, Tuple, List
import aiosqlite
from src.consensus.sub_block_record import SubBlockRecord
from src.types.header_block import HeaderBlock
from src.util.ints import uint32, uint64
from src.wallet.block_record import HeaderBlockRecord
from src.types.sized_bytes import bytes32
class WalletBlockStore:
"""
This object handles HeaderBlocks and SubBlocks stored in DB used by wallet.
"""
db: aiosqlite.Connection
@classmethod
async def create(cls, connection: aiosqlite.Connection):
self = cls()
self.db = connection
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, sub_height int, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_height on header_blocks(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
# Sub block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_block_records(header_hash "
"text PRIMARY KEY, prev_hash text, sub_height bigint, height int, weight bigint, total_iters text,"
"sub_block blob, is_peak tinyint)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_block_height on sub_block_records(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on sub_block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on sub_block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on sub_block_records(is_peak)")
await self.db.commit()
await self.db.commit()
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def rollback_lca_to_block(self, block_index):
# TODO
pass
async def add_block_record(self, block_record: HeaderBlockRecord, sub_block: SubBlockRecord):
"""
Adds a block record to the database. This block record is assumed to be connected
to the chain, but it may or may not be in the LCA path.
"""
if block_record.header.foliage_block is not None:
timestamp = block_record.header.foliage_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?, ?)",
(
block_record.header_hash.hex(),
block_record.sub_block_height,
sub_block.height,
timestamp,
bytes(block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO sub_block_records VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(
block_record.header.header_hash.hex(),
block_record.header.prev_header_hash.hex(),
block_record.header.sub_block_height,
block_record.header.height,
block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(sub_block),
False,
),
)
await cursor_2.close()
await self.db.commit()
async def get_header_block(self, header_hash: bytes32) -> Optional[HeaderBlock]:
"""Gets a block record from the database, if present"""
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr.header
else:
return None
async def get_header_block_at(self, sub_heights: List[uint32]) -> List[HeaderBlock]:
if len(sub_heights) == 0:
return []
heights_db = tuple(sub_heights)
formatted_str = f'SELECT block from header_blocks WHERE sub_height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
"""Gets a block record from the database, if present"""
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr
else:
return None
async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]:
cursor = await self.db.execute(
"SELECT sub_block from sub_block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return SubBlockRecord.from_bytes(row[0])
return None
async def get_sub_block_records(
self,
) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all sub blocks, as well as the header hash of the peak,
if present.
"""
cursor = await self.db.execute("SELECT * from sub_block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, SubBlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash = bytes.fromhex(row[0])
ret[header_hash] = SubBlockRecord.from_bytes(row[6])
if row[7]:
assert peak is None # Sanity check, only one peak
peak = header_hash
return ret, peak
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE sub_block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE sub_block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
await self.db.commit()
| 39.745665 | 112 | 0.627981 | from typing import Dict, Optional, Tuple, List
import aiosqlite
from src.consensus.sub_block_record import SubBlockRecord
from src.types.header_block import HeaderBlock
from src.util.ints import uint32, uint64
from src.wallet.block_record import HeaderBlockRecord
from src.types.sized_bytes import bytes32
class WalletBlockStore:
db: aiosqlite.Connection
@classmethod
async def create(cls, connection: aiosqlite.Connection):
self = cls()
self.db = connection
await self.db.execute(
"CREATE TABLE IF NOT EXISTS header_blocks(header_hash text PRIMARY KEY, sub_height int, height int,"
" timestamp int, block blob)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS header_hash on header_blocks(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS timestamp on header_blocks(timestamp)")
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_height on header_blocks(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on header_blocks(height)")
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_block_records(header_hash "
"text PRIMARY KEY, prev_hash text, sub_height bigint, height int, weight bigint, total_iters text,"
"sub_block blob, is_peak tinyint)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS sub_block_height on sub_block_records(sub_height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on sub_block_records(height)")
await self.db.execute("CREATE INDEX IF NOT EXISTS hh on sub_block_records(header_hash)")
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on sub_block_records(is_peak)")
await self.db.commit()
await self.db.commit()
return self
async def _clear_database(self):
cursor_2 = await self.db.execute("DELETE FROM header_blocks")
await cursor_2.close()
await self.db.commit()
async def rollback_lca_to_block(self, block_index):
pass
async def add_block_record(self, block_record: HeaderBlockRecord, sub_block: SubBlockRecord):
if block_record.header.foliage_block is not None:
timestamp = block_record.header.foliage_block.timestamp
else:
timestamp = uint64(0)
cursor = await self.db.execute(
"INSERT OR REPLACE INTO header_blocks VALUES(?, ?, ?, ?, ?)",
(
block_record.header_hash.hex(),
block_record.sub_block_height,
sub_block.height,
timestamp,
bytes(block_record),
),
)
await cursor.close()
cursor_2 = await self.db.execute(
"INSERT OR REPLACE INTO sub_block_records VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(
block_record.header.header_hash.hex(),
block_record.header.prev_header_hash.hex(),
block_record.header.sub_block_height,
block_record.header.height,
block_record.header.weight.to_bytes(128 // 8, "big", signed=False).hex(),
block_record.header.total_iters.to_bytes(128 // 8, "big", signed=False).hex(),
bytes(sub_block),
False,
),
)
await cursor_2.close()
await self.db.commit()
async def get_header_block(self, header_hash: bytes32) -> Optional[HeaderBlock]:
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr.header
else:
return None
async def get_header_block_at(self, sub_heights: List[uint32]) -> List[HeaderBlock]:
if len(sub_heights) == 0:
return []
heights_db = tuple(sub_heights)
formatted_str = f'SELECT block from header_blocks WHERE sub_height in ({"?," * (len(heights_db) - 1)}?)'
cursor = await self.db.execute(formatted_str, heights_db)
rows = await cursor.fetchall()
await cursor.close()
return [HeaderBlock.from_bytes(row[0]) for row in rows]
async def get_header_block_record(self, header_hash: bytes32) -> Optional[HeaderBlockRecord]:
cursor = await self.db.execute("SELECT * from header_blocks WHERE header_hash=?", (header_hash.hex(),))
row = await cursor.fetchone()
await cursor.close()
if row is not None:
hbr = HeaderBlockRecord.from_bytes(row[4])
return hbr
else:
return None
async def get_sub_block_record(self, header_hash: bytes32) -> Optional[SubBlockRecord]:
cursor = await self.db.execute(
"SELECT sub_block from sub_block_records WHERE header_hash=?",
(header_hash.hex(),),
)
row = await cursor.fetchone()
await cursor.close()
if row is not None:
return SubBlockRecord.from_bytes(row[0])
return None
async def get_sub_block_records(
self,
) -> Tuple[Dict[bytes32, SubBlockRecord], Optional[bytes32]]:
cursor = await self.db.execute("SELECT * from sub_block_records")
rows = await cursor.fetchall()
await cursor.close()
ret: Dict[bytes32, SubBlockRecord] = {}
peak: Optional[bytes32] = None
for row in rows:
header_hash = bytes.fromhex(row[0])
ret[header_hash] = SubBlockRecord.from_bytes(row[6])
if row[7]:
assert peak is None
peak = header_hash
return ret, peak
async def set_peak(self, header_hash: bytes32) -> None:
cursor_1 = await self.db.execute("UPDATE sub_block_records SET is_peak=0 WHERE is_peak=1")
await cursor_1.close()
cursor_2 = await self.db.execute(
"UPDATE sub_block_records SET is_peak=1 WHERE header_hash=?",
(header_hash.hex(),),
)
await cursor_2.close()
await self.db.commit()
| true | true |
f723db4534fac7bbdbc1fca243f9cc9d68e9c713 | 56,361 | py | Python | plugins/modules/oci_loadbalancer_backend_set.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_loadbalancer_backend_set.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | plugins/modules/oci_loadbalancer_backend_set.py | LaudateCorpus1/oci-ansible-collection | 2b1cd87b4d652a97c1ca752cfc4fdc4bdb37a7e7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# Copyright (c) 2020, 2022 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
# GENERATED FILE - DO NOT EDIT - MANUAL CHANGES WILL BE OVERWRITTEN
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_loadbalancer_backend_set
short_description: Manage a BackendSet resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a BackendSet resource in Oracle Cloud Infrastructure
- For I(state=present), adds a backend set to a load balancer.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
name:
description:
- A friendly name for the backend set. It must be unique and it cannot be changed.
- Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot
contain spaces. Avoid entering confidential information.
- "Example: `example_backend_set`"
type: str
required: true
policy:
description:
- The load balancer policy for the backend set. To get a list of available policies, use the
L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation.
- "Example: `LEAST_CONNECTIONS`"
- Required for create using I(state=present), update using I(state=present) with name present.
type: str
backends:
description:
- ""
- Required for update using I(state=present) with name present.
type: list
elements: dict
suboptions:
ip_address:
description:
- The IP address of the backend server.
- "Example: `10.0.0.3`"
type: str
required: true
port:
description:
- The communication port for the backend server.
- "Example: `8080`"
type: int
required: true
weight:
description:
- The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger
proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections
as a server weighted '1'.
For more information on load balancing policies, see
L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm).
- "Example: `3`"
type: int
backup:
description:
- "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress
traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy."
- "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy."
- "Example: `false`"
type: bool
drain:
description:
- "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new
incoming traffic."
- "Example: `false`"
type: bool
offline:
description:
- Whether the load balancer should treat this server as offline. Offline servers receive no incoming
traffic.
- "Example: `false`"
type: bool
health_checker:
description:
- ""
- Required for create using I(state=present), update using I(state=present) with name present.
type: dict
suboptions:
protocol:
description:
- The protocol the health check must use; either HTTP or TCP.
- "Example: `HTTP`"
type: str
required: true
url_path:
description:
- The path against which to run the health check.
- "Example: `/healthcheck`"
type: str
port:
description:
- The backend server port against which to run the health check. If the port is not specified, the load balancer uses the
port information from the `Backend` object.
- "Example: `8080`"
type: int
return_code:
description:
- The status code a healthy backend server should return.
- "Example: `200`"
type: int
retries:
description:
- "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies
when recovering a server to the \\"healthy\\" state."
- "Example: `3`"
type: int
timeout_in_millis:
description:
- The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply
returns within this timeout period.
- "Example: `3000`"
type: int
interval_in_millis:
description:
- The interval between health checks, in milliseconds.
- "Example: `10000`"
type: int
response_body_regex:
description:
- A regular expression for parsing the response body from the backend server.
- "Example: `^((?!false).|\\\\s)*$`"
type: str
ssl_configuration:
description:
- ""
- This parameter is updatable.
type: dict
suboptions:
verify_depth:
description:
- The maximum depth for peer certificate chain verification.
- "Example: `3`"
type: int
verify_peer_certificate:
description:
- Whether the load balancer listener should verify peer certificates.
- "Example: `true`"
type: bool
trusted_certificate_authority_ids:
description:
- Ids for OCI certificates service CA or CA bundles for the load balancer to trust.
- "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`"
type: list
elements: str
certificate_ids:
description:
- Ids for OCI certificates service certificates. Currently only a single Id may be passed.
- "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`"
type: list
elements: str
certificate_name:
description:
- A friendly name for the certificate bundle. It must be unique and it cannot be changed.
Valid certificate bundle names include only alphanumeric characters, dashes, and underscores.
Certificate bundle names cannot contain spaces. Avoid entering confidential information.
- "Example: `example_certificate_bundle`"
type: str
protocols:
description:
- A list of SSL protocols the load balancer must support for HTTPS or SSL connections.
- The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure
connection ensures that all data passed between the client and the server is private.
- "The Load Balancing service supports the following protocols:"
- "* TLSv1
* TLSv1.1
* TLSv1.2"
- If this field is not specified, TLSv1.2 is the default.
- "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols."
- "**Notes:**"
- "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols.
* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite.
* For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation
displays a list of SSL protocols currently used by those resources."
- "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`"
type: list
elements: str
cipher_suite_name:
description:
- The name of the cipher suite to use for HTTPS or SSL connections.
- If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`.
- "**Notes:**"
- "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite. Clients cannot perform an SSL handshake if there is an incompatible configuration.
* You must ensure compatibility between the ciphers configured in the cipher suite and the configured
certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA
certificates.
* If the cipher configuration is not modified after load balancer creation, the `GET` operation returns
`oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners
that predate this feature.
* If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET`
operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for
existing listeners that predate this feature.
* The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL
configuration for existing backend sets that predate this feature.
* If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field,
you must specify an appropriate predefined or custom cipher suite name when updating the resource.
* The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for
this field."
- "example: `example_cipher_suite`"
type: str
server_order_preference:
description:
- When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client
ciphers.
- "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This
field is ignored when the `SSLConfiguration` object is associated with a backend set."
type: str
choices:
- "ENABLED"
- "DISABLED"
session_persistence_configuration:
description:
- ""
- This parameter is updatable.
type: dict
suboptions:
cookie_name:
description:
- "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify
that any cookie set by the backend causes the session to persist."
- "Example: `example_cookie`"
type: str
required: true
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
type: bool
lb_cookie_session_persistence_configuration:
description:
- ""
- This parameter is updatable.
type: dict
suboptions:
cookie_name:
description:
- "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults
to \\"X-Oracle-BMC-LBS-Route\\"."
- "Example: `example_cookie`"
- "**Notes:**"
- "* Ensure that the cookie name used at the backend application servers is different from the cookie name used
at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix
such as \\"X-Oracle-OCI-\\" for this field."
- "* If a backend server and the load balancer both insert cookies with the same name, the client or browser
behavior can vary depending on the domain and path values associated with the cookie. If the name, domain,
and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the
load balancer are all the same, the client or browser treats them as one cookie and returns only one of
the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path
names are different, the client or browser treats them as two different cookies."
type: str
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
type: bool
domain:
description:
- The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a
domain attribute with the specified value.
- This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain
attribute into the `Set-cookie` header.
- "**Notes:**"
- "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and
browser behavior when the domain attribute is present or not present in the `Set-cookie` header."
- If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes
the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and
`www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for
the domain to which the original request was made.
- "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie`
header does not include the domain to which the original request was made, the client or browser might reject
the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com`
or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute
`abc.example.com` or `www.abc.example.com` sent from `www.example.com`."
- "Example: `example.com`"
type: str
path:
description:
- The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path`
attribute with the specified value.
- Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a
subdirectory of, the cookie's `Path` attribute.
- The default value is `/`.
- "Example: `/example`"
type: str
max_age_in_seconds:
description:
- The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains
a `Max-Age` attribute with the specified value.
- The specified value must be at least one second. There is no default value for this attribute. If you do not
specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In
most cases, the client or browser retains the cookie until the current session ends, as defined by the client.
- "Example: `3600`"
type: int
is_secure:
description:
- Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the
cookie only using a secure protocol.
- "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP
listener."
- "Example: `true`"
type: bool
is_http_only:
description:
- Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP
requests. This attribute directs the client or browser to omit the cookie when providing access to cookies
through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels.
- "Example: `true`"
type: bool
load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the load balancer on which to add a backend set.
type: str
aliases: ["id"]
required: true
state:
description:
- The state of the BackendSet.
- Use I(state=present) to create or update a BackendSet.
- Use I(state=absent) to delete a BackendSet.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create backend_set
oci_loadbalancer_backend_set:
# required
name: name_example
policy: policy_example
health_checker:
# required
protocol: protocol_example
# optional
url_path: url_path_example
port: 56
return_code: 56
retries: 56
timeout_in_millis: 56
interval_in_millis: 56
response_body_regex: response_body_regex_example
load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
# optional
backends:
- # required
ip_address: ip_address_example
port: 56
# optional
weight: 56
backup: true
drain: true
offline: true
ssl_configuration:
# optional
verify_depth: 56
verify_peer_certificate: true
trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ]
certificate_ids: [ "certificate_ids_example" ]
certificate_name: certificate_name_example
protocols: [ "protocols_example" ]
cipher_suite_name: cipher_suite_name_example
server_order_preference: ENABLED
session_persistence_configuration:
# required
cookie_name: cookie_name_example
# optional
disable_fallback: true
lb_cookie_session_persistence_configuration:
# optional
cookie_name: cookie_name_example
disable_fallback: true
domain: domain_example
path: path_example
max_age_in_seconds: 56
is_secure: true
is_http_only: true
- name: Update backend_set
oci_loadbalancer_backend_set:
# required
name: name_example
policy: policy_example
backends:
- # required
ip_address: ip_address_example
port: 56
# optional
weight: 56
backup: true
drain: true
offline: true
health_checker:
# required
protocol: protocol_example
# optional
url_path: url_path_example
port: 56
return_code: 56
retries: 56
timeout_in_millis: 56
interval_in_millis: 56
response_body_regex: response_body_regex_example
load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
# optional
ssl_configuration:
# optional
verify_depth: 56
verify_peer_certificate: true
trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ]
certificate_ids: [ "certificate_ids_example" ]
certificate_name: certificate_name_example
protocols: [ "protocols_example" ]
cipher_suite_name: cipher_suite_name_example
server_order_preference: ENABLED
session_persistence_configuration:
# required
cookie_name: cookie_name_example
# optional
disable_fallback: true
lb_cookie_session_persistence_configuration:
# optional
cookie_name: cookie_name_example
disable_fallback: true
domain: domain_example
path: path_example
max_age_in_seconds: 56
is_secure: true
is_http_only: true
- name: Delete backend_set
oci_loadbalancer_backend_set:
# required
name: name_example
load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
backend_set:
description:
- Details of the BackendSet resource acted upon by the current operation
returned: on success
type: complex
contains:
name:
description:
- A friendly name for the backend set. It must be unique and it cannot be changed.
- Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot
contain spaces. Avoid entering confidential information.
- "Example: `example_backend_set`"
returned: on success
type: str
sample: name_example
policy:
description:
- The load balancer policy for the backend set. To get a list of available policies, use the
L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation.
- "Example: `LEAST_CONNECTIONS`"
returned: on success
type: str
sample: policy_example
backends:
description:
- ""
returned: on success
type: complex
contains:
name:
description:
- A read-only field showing the IP address and port that uniquely identify this backend server in the backend set.
- "Example: `10.0.0.3:8080`"
returned: on success
type: str
sample: name_example
ip_address:
description:
- The IP address of the backend server.
- "Example: `10.0.0.3`"
returned: on success
type: str
sample: ip_address_example
port:
description:
- The communication port for the backend server.
- "Example: `8080`"
returned: on success
type: int
sample: 56
weight:
description:
- The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger
proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections
as a server weighted '1'.
For more information on load balancing policies, see
L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm).
- "Example: `3`"
returned: on success
type: int
sample: 56
drain:
description:
- "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new
incoming traffic."
- "Example: `false`"
returned: on success
type: bool
sample: true
backup:
description:
- "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress
traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy."
- "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy."
- "Example: `false`"
returned: on success
type: bool
sample: true
offline:
description:
- Whether the load balancer should treat this server as offline. Offline servers receive no incoming
traffic.
- "Example: `false`"
returned: on success
type: bool
sample: true
health_checker:
description:
- ""
returned: on success
type: complex
contains:
protocol:
description:
- The protocol the health check must use; either HTTP or TCP.
- "Example: `HTTP`"
returned: on success
type: str
sample: protocol_example
url_path:
description:
- The path against which to run the health check.
- "Example: `/healthcheck`"
returned: on success
type: str
sample: url_path_example
port:
description:
- The backend server port against which to run the health check. If the port is not specified, the load balancer uses the
port information from the `Backend` object.
- "Example: `8080`"
returned: on success
type: int
sample: 56
return_code:
description:
- "The status code a healthy backend server should return. If you configure the health check policy to use the HTTP protocol,
you can use common HTTP status codes such as \\"200\\"."
- "Example: `200`"
returned: on success
type: int
sample: 56
retries:
description:
- "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies
when recovering a server to the \\"healthy\\" state. Defaults to 3."
- "Example: `3`"
returned: on success
type: int
sample: 56
timeout_in_millis:
description:
- The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply
returns within this timeout period. Defaults to 3000 (3 seconds).
- "Example: `3000`"
returned: on success
type: int
sample: 56
interval_in_millis:
description:
- The interval between health checks, in milliseconds. The default is 10000 (10 seconds).
- "Example: `10000`"
returned: on success
type: int
sample: 56
response_body_regex:
description:
- A regular expression for parsing the response body from the backend server.
- "Example: `^((?!false).|\\\\s)*$`"
returned: on success
type: str
sample: response_body_regex_example
ssl_configuration:
description:
- ""
returned: on success
type: complex
contains:
verify_depth:
description:
- The maximum depth for peer certificate chain verification.
- "Example: `3`"
returned: on success
type: int
sample: 56
verify_peer_certificate:
description:
- Whether the load balancer listener should verify peer certificates.
- "Example: `true`"
returned: on success
type: bool
sample: true
trusted_certificate_authority_ids:
description:
- Ids for OCI certificates service CA or CA bundles for the load balancer to trust.
- "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`"
returned: on success
type: list
sample: []
certificate_ids:
description:
- Ids for OCI certificates service certificates. Currently only a single Id may be passed.
- "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`"
returned: on success
type: list
sample: []
certificate_name:
description:
- A friendly name for the certificate bundle. It must be unique and it cannot be changed.
Valid certificate bundle names include only alphanumeric characters, dashes, and underscores.
Certificate bundle names cannot contain spaces. Avoid entering confidential information.
- "Example: `example_certificate_bundle`"
returned: on success
type: str
sample: certificate_name_example
server_order_preference:
description:
- When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client
ciphers.
- "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This
field is ignored when the `SSLConfiguration` object is associated with a backend set."
returned: on success
type: str
sample: ENABLED
cipher_suite_name:
description:
- The name of the cipher suite to use for HTTPS or SSL connections.
- If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`.
- "**Notes:**"
- "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite. Clients cannot perform an SSL handshake if there is an incompatible configuration.
* You must ensure compatibility between the ciphers configured in the cipher suite and the configured
certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA
certificates.
* If the cipher configuration is not modified after load balancer creation, the `GET` operation returns
`oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners
that predate this feature.
* If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET`
operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for
existing listeners that predate this feature.
* The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL
configuration for existing backend sets that predate this feature.
* If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field,
you must specify an appropriate predefined or custom cipher suite name when updating the resource.
* The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for
this field."
- "example: `example_cipher_suite`"
returned: on success
type: str
sample: cipher_suite_name_example
protocols:
description:
- A list of SSL protocols the load balancer must support for HTTPS or SSL connections.
- The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure
connection ensures that all data passed between the client and the server is private.
- "The Load Balancing service supports the following protocols:"
- "* TLSv1
* TLSv1.1
* TLSv1.2"
- If this field is not specified, TLSv1.2 is the default.
- "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols."
- "**Notes:**"
- "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols.
* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite.
* For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation
displays a list of SSL protocols currently used by those resources."
- "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`"
returned: on success
type: list
sample: []
session_persistence_configuration:
description:
- ""
returned: on success
type: complex
contains:
cookie_name:
description:
- "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify
that any cookie set by the backend causes the session to persist."
- "Example: `example_cookie`"
returned: on success
type: str
sample: cookie_name_example
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
returned: on success
type: bool
sample: true
lb_cookie_session_persistence_configuration:
description:
- ""
returned: on success
type: complex
contains:
cookie_name:
description:
- "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults
to \\"X-Oracle-BMC-LBS-Route\\"."
- "Example: `example_cookie`"
- "**Notes:**"
- "* Ensure that the cookie name used at the backend application servers is different from the cookie name used
at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix
such as \\"X-Oracle-OCI-\\" for this field."
- "* If a backend server and the load balancer both insert cookies with the same name, the client or browser
behavior can vary depending on the domain and path values associated with the cookie. If the name, domain,
and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the
load balancer are all the same, the client or browser treats them as one cookie and returns only one of
the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path
names are different, the client or browser treats them as two different cookies."
returned: on success
type: str
sample: cookie_name_example
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
returned: on success
type: bool
sample: true
domain:
description:
- The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a
domain attribute with the specified value.
- This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain
attribute into the `Set-cookie` header.
- "**Notes:**"
- "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and
browser behavior when the domain attribute is present or not present in the `Set-cookie` header."
- If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes
the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and
`www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for
the domain to which the original request was made.
- "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie`
header does not include the domain to which the original request was made, the client or browser might reject
the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com`
or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute
`abc.example.com` or `www.abc.example.com` sent from `www.example.com`."
- "Example: `example.com`"
returned: on success
type: str
sample: domain_example
path:
description:
- The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path`
attribute with the specified value.
- Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a
subdirectory of, the cookie's `Path` attribute.
- The default value is `/`.
- "Example: `/example`"
returned: on success
type: str
sample: path_example
max_age_in_seconds:
description:
- The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains
a `Max-Age` attribute with the specified value.
- The specified value must be at least one second. There is no default value for this attribute. If you do not
specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In
most cases, the client or browser retains the cookie until the current session ends, as defined by the client.
- "Example: `3600`"
returned: on success
type: int
sample: 56
is_secure:
description:
- Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the
cookie only using a secure protocol.
- "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP
listener."
- "Example: `true`"
returned: on success
type: bool
sample: true
is_http_only:
description:
- Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP
requests. This attribute directs the client or browser to omit the cookie when providing access to cookies
through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels.
- "Example: `true`"
returned: on success
type: bool
sample: true
sample: {
"name": "name_example",
"policy": "policy_example",
"backends": [{
"name": "name_example",
"ip_address": "ip_address_example",
"port": 56,
"weight": 56,
"drain": true,
"backup": true,
"offline": true
}],
"health_checker": {
"protocol": "protocol_example",
"url_path": "url_path_example",
"port": 56,
"return_code": 56,
"retries": 56,
"timeout_in_millis": 56,
"interval_in_millis": 56,
"response_body_regex": "response_body_regex_example"
},
"ssl_configuration": {
"verify_depth": 56,
"verify_peer_certificate": true,
"trusted_certificate_authority_ids": [],
"certificate_ids": [],
"certificate_name": "certificate_name_example",
"server_order_preference": "ENABLED",
"cipher_suite_name": "cipher_suite_name_example",
"protocols": []
},
"session_persistence_configuration": {
"cookie_name": "cookie_name_example",
"disable_fallback": true
},
"lb_cookie_session_persistence_configuration": {
"cookie_name": "cookie_name_example",
"disable_fallback": true,
"domain": "domain_example",
"path": "path_example",
"max_age_in_seconds": 56,
"is_secure": true,
"is_http_only": true
}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.load_balancer import LoadBalancerClient
from oci.load_balancer.models import CreateBackendSetDetails
from oci.load_balancer.models import UpdateBackendSetDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class BackendSetHelperGen(OCIResourceHelperBase):
"""Supported operations: create, update, get, list and delete"""
def get_possible_entity_types(self):
return super(BackendSetHelperGen, self).get_possible_entity_types() + [
"backendset",
"backendsets",
"loadBalancerbackendset",
"loadBalancerbackendsets",
"backendsetresource",
"backendsetsresource",
"loadbalancer",
]
def get_module_resource_id_param(self):
return "name"
def get_module_resource_id(self):
return self.module.params.get("name")
def get_get_fn(self):
return self.client.get_backend_set
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_backend_set,
load_balancer_id=self.module.params.get("load_balancer_id"),
backend_set_name=self.module.params.get("name"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"load_balancer_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
return dict()
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_backend_sets, **kwargs
)
def get_create_model_class(self):
return CreateBackendSetDetails
def is_update(self):
if not self.module.params.get("state") == "present":
return False
return self.does_resource_exist()
def is_create(self):
if not self.module.params.get("state") == "present":
return False
return not self.does_resource_exist()
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_backend_set,
call_fn_args=(),
call_fn_kwargs=dict(
create_backend_set_details=create_details,
load_balancer_id=self.module.params.get("load_balancer_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdateBackendSetDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_backend_set,
call_fn_args=(),
call_fn_kwargs=dict(
update_backend_set_details=update_details,
load_balancer_id=self.module.params.get("load_balancer_id"),
backend_set_name=self.module.params.get("name"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_backend_set,
call_fn_args=(),
call_fn_kwargs=dict(
load_balancer_id=self.module.params.get("load_balancer_id"),
backend_set_name=self.module.params.get("name"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
BackendSetHelperCustom = get_custom_class("BackendSetHelperCustom")
class ResourceHelper(BackendSetHelperCustom, BackendSetHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
name=dict(type="str", required=True),
policy=dict(type="str"),
backends=dict(
type="list",
elements="dict",
options=dict(
ip_address=dict(type="str", required=True),
port=dict(type="int", required=True),
weight=dict(type="int"),
backup=dict(type="bool"),
drain=dict(type="bool"),
offline=dict(type="bool"),
),
),
health_checker=dict(
type="dict",
options=dict(
protocol=dict(type="str", required=True),
url_path=dict(type="str"),
port=dict(type="int"),
return_code=dict(type="int"),
retries=dict(type="int"),
timeout_in_millis=dict(type="int"),
interval_in_millis=dict(type="int"),
response_body_regex=dict(type="str"),
),
),
ssl_configuration=dict(
type="dict",
options=dict(
verify_depth=dict(type="int"),
verify_peer_certificate=dict(type="bool"),
trusted_certificate_authority_ids=dict(type="list", elements="str"),
certificate_ids=dict(type="list", elements="str"),
certificate_name=dict(type="str"),
protocols=dict(type="list", elements="str"),
cipher_suite_name=dict(type="str"),
server_order_preference=dict(
type="str", choices=["ENABLED", "DISABLED"]
),
),
),
session_persistence_configuration=dict(
type="dict",
options=dict(
cookie_name=dict(type="str", required=True),
disable_fallback=dict(type="bool"),
),
),
lb_cookie_session_persistence_configuration=dict(
type="dict",
options=dict(
cookie_name=dict(type="str"),
disable_fallback=dict(type="bool"),
domain=dict(type="str"),
path=dict(type="str"),
max_age_in_seconds=dict(type="int"),
is_secure=dict(type="bool"),
is_http_only=dict(type="bool"),
),
),
load_balancer_id=dict(aliases=["id"], type="str", required=True),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="backend_set",
service_client_class=LoadBalancerClient,
namespace="load_balancer",
)
result = dict(changed=False)
if resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| 49.657269 | 149 | 0.558134 |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
"metadata_version": "1.1",
"status": ["preview"],
"supported_by": "community",
}
DOCUMENTATION = """
---
module: oci_loadbalancer_backend_set
short_description: Manage a BackendSet resource in Oracle Cloud Infrastructure
description:
- This module allows the user to create, update and delete a BackendSet resource in Oracle Cloud Infrastructure
- For I(state=present), adds a backend set to a load balancer.
version_added: "2.9.0"
author: Oracle (@oracle)
options:
name:
description:
- A friendly name for the backend set. It must be unique and it cannot be changed.
- Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot
contain spaces. Avoid entering confidential information.
- "Example: `example_backend_set`"
type: str
required: true
policy:
description:
- The load balancer policy for the backend set. To get a list of available policies, use the
L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation.
- "Example: `LEAST_CONNECTIONS`"
- Required for create using I(state=present), update using I(state=present) with name present.
type: str
backends:
description:
- ""
- Required for update using I(state=present) with name present.
type: list
elements: dict
suboptions:
ip_address:
description:
- The IP address of the backend server.
- "Example: `10.0.0.3`"
type: str
required: true
port:
description:
- The communication port for the backend server.
- "Example: `8080`"
type: int
required: true
weight:
description:
- The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger
proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections
as a server weighted '1'.
For more information on load balancing policies, see
L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm).
- "Example: `3`"
type: int
backup:
description:
- "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress
traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy."
- "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy."
- "Example: `false`"
type: bool
drain:
description:
- "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new
incoming traffic."
- "Example: `false`"
type: bool
offline:
description:
- Whether the load balancer should treat this server as offline. Offline servers receive no incoming
traffic.
- "Example: `false`"
type: bool
health_checker:
description:
- ""
- Required for create using I(state=present), update using I(state=present) with name present.
type: dict
suboptions:
protocol:
description:
- The protocol the health check must use; either HTTP or TCP.
- "Example: `HTTP`"
type: str
required: true
url_path:
description:
- The path against which to run the health check.
- "Example: `/healthcheck`"
type: str
port:
description:
- The backend server port against which to run the health check. If the port is not specified, the load balancer uses the
port information from the `Backend` object.
- "Example: `8080`"
type: int
return_code:
description:
- The status code a healthy backend server should return.
- "Example: `200`"
type: int
retries:
description:
- "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies
when recovering a server to the \\"healthy\\" state."
- "Example: `3`"
type: int
timeout_in_millis:
description:
- The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply
returns within this timeout period.
- "Example: `3000`"
type: int
interval_in_millis:
description:
- The interval between health checks, in milliseconds.
- "Example: `10000`"
type: int
response_body_regex:
description:
- A regular expression for parsing the response body from the backend server.
- "Example: `^((?!false).|\\\\s)*$`"
type: str
ssl_configuration:
description:
- ""
- This parameter is updatable.
type: dict
suboptions:
verify_depth:
description:
- The maximum depth for peer certificate chain verification.
- "Example: `3`"
type: int
verify_peer_certificate:
description:
- Whether the load balancer listener should verify peer certificates.
- "Example: `true`"
type: bool
trusted_certificate_authority_ids:
description:
- Ids for OCI certificates service CA or CA bundles for the load balancer to trust.
- "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`"
type: list
elements: str
certificate_ids:
description:
- Ids for OCI certificates service certificates. Currently only a single Id may be passed.
- "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`"
type: list
elements: str
certificate_name:
description:
- A friendly name for the certificate bundle. It must be unique and it cannot be changed.
Valid certificate bundle names include only alphanumeric characters, dashes, and underscores.
Certificate bundle names cannot contain spaces. Avoid entering confidential information.
- "Example: `example_certificate_bundle`"
type: str
protocols:
description:
- A list of SSL protocols the load balancer must support for HTTPS or SSL connections.
- The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure
connection ensures that all data passed between the client and the server is private.
- "The Load Balancing service supports the following protocols:"
- "* TLSv1
* TLSv1.1
* TLSv1.2"
- If this field is not specified, TLSv1.2 is the default.
- "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols."
- "**Notes:**"
- "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols.
* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite.
* For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation
displays a list of SSL protocols currently used by those resources."
- "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`"
type: list
elements: str
cipher_suite_name:
description:
- The name of the cipher suite to use for HTTPS or SSL connections.
- If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`.
- "**Notes:**"
- "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite. Clients cannot perform an SSL handshake if there is an incompatible configuration.
* You must ensure compatibility between the ciphers configured in the cipher suite and the configured
certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA
certificates.
* If the cipher configuration is not modified after load balancer creation, the `GET` operation returns
`oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners
that predate this feature.
* If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET`
operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for
existing listeners that predate this feature.
* The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL
configuration for existing backend sets that predate this feature.
* If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field,
you must specify an appropriate predefined or custom cipher suite name when updating the resource.
* The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for
this field."
- "example: `example_cipher_suite`"
type: str
server_order_preference:
description:
- When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client
ciphers.
- "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This
field is ignored when the `SSLConfiguration` object is associated with a backend set."
type: str
choices:
- "ENABLED"
- "DISABLED"
session_persistence_configuration:
description:
- ""
- This parameter is updatable.
type: dict
suboptions:
cookie_name:
description:
- "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify
that any cookie set by the backend causes the session to persist."
- "Example: `example_cookie`"
type: str
required: true
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
type: bool
lb_cookie_session_persistence_configuration:
description:
- ""
- This parameter is updatable.
type: dict
suboptions:
cookie_name:
description:
- "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults
to \\"X-Oracle-BMC-LBS-Route\\"."
- "Example: `example_cookie`"
- "**Notes:**"
- "* Ensure that the cookie name used at the backend application servers is different from the cookie name used
at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix
such as \\"X-Oracle-OCI-\\" for this field."
- "* If a backend server and the load balancer both insert cookies with the same name, the client or browser
behavior can vary depending on the domain and path values associated with the cookie. If the name, domain,
and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the
load balancer are all the same, the client or browser treats them as one cookie and returns only one of
the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path
names are different, the client or browser treats them as two different cookies."
type: str
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
type: bool
domain:
description:
- The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a
domain attribute with the specified value.
- This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain
attribute into the `Set-cookie` header.
- "**Notes:**"
- "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and
browser behavior when the domain attribute is present or not present in the `Set-cookie` header."
- If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes
the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and
`www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for
the domain to which the original request was made.
- "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie`
header does not include the domain to which the original request was made, the client or browser might reject
the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com`
or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute
`abc.example.com` or `www.abc.example.com` sent from `www.example.com`."
- "Example: `example.com`"
type: str
path:
description:
- The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path`
attribute with the specified value.
- Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a
subdirectory of, the cookie's `Path` attribute.
- The default value is `/`.
- "Example: `/example`"
type: str
max_age_in_seconds:
description:
- The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains
a `Max-Age` attribute with the specified value.
- The specified value must be at least one second. There is no default value for this attribute. If you do not
specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In
most cases, the client or browser retains the cookie until the current session ends, as defined by the client.
- "Example: `3600`"
type: int
is_secure:
description:
- Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the
cookie only using a secure protocol.
- "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP
listener."
- "Example: `true`"
type: bool
is_http_only:
description:
- Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP
requests. This attribute directs the client or browser to omit the cookie when providing access to cookies
through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels.
- "Example: `true`"
type: bool
load_balancer_id:
description:
- The L(OCID,https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm) of the load balancer on which to add a backend set.
type: str
aliases: ["id"]
required: true
state:
description:
- The state of the BackendSet.
- Use I(state=present) to create or update a BackendSet.
- Use I(state=absent) to delete a BackendSet.
type: str
required: false
default: 'present'
choices: ["present", "absent"]
extends_documentation_fragment: [ oracle.oci.oracle, oracle.oci.oracle_creatable_resource, oracle.oci.oracle_wait_options ]
"""
EXAMPLES = """
- name: Create backend_set
oci_loadbalancer_backend_set:
# required
name: name_example
policy: policy_example
health_checker:
# required
protocol: protocol_example
# optional
url_path: url_path_example
port: 56
return_code: 56
retries: 56
timeout_in_millis: 56
interval_in_millis: 56
response_body_regex: response_body_regex_example
load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
# optional
backends:
- # required
ip_address: ip_address_example
port: 56
# optional
weight: 56
backup: true
drain: true
offline: true
ssl_configuration:
# optional
verify_depth: 56
verify_peer_certificate: true
trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ]
certificate_ids: [ "certificate_ids_example" ]
certificate_name: certificate_name_example
protocols: [ "protocols_example" ]
cipher_suite_name: cipher_suite_name_example
server_order_preference: ENABLED
session_persistence_configuration:
# required
cookie_name: cookie_name_example
# optional
disable_fallback: true
lb_cookie_session_persistence_configuration:
# optional
cookie_name: cookie_name_example
disable_fallback: true
domain: domain_example
path: path_example
max_age_in_seconds: 56
is_secure: true
is_http_only: true
- name: Update backend_set
oci_loadbalancer_backend_set:
# required
name: name_example
policy: policy_example
backends:
- # required
ip_address: ip_address_example
port: 56
# optional
weight: 56
backup: true
drain: true
offline: true
health_checker:
# required
protocol: protocol_example
# optional
url_path: url_path_example
port: 56
return_code: 56
retries: 56
timeout_in_millis: 56
interval_in_millis: 56
response_body_regex: response_body_regex_example
load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
# optional
ssl_configuration:
# optional
verify_depth: 56
verify_peer_certificate: true
trusted_certificate_authority_ids: [ "trusted_certificate_authority_ids_example" ]
certificate_ids: [ "certificate_ids_example" ]
certificate_name: certificate_name_example
protocols: [ "protocols_example" ]
cipher_suite_name: cipher_suite_name_example
server_order_preference: ENABLED
session_persistence_configuration:
# required
cookie_name: cookie_name_example
# optional
disable_fallback: true
lb_cookie_session_persistence_configuration:
# optional
cookie_name: cookie_name_example
disable_fallback: true
domain: domain_example
path: path_example
max_age_in_seconds: 56
is_secure: true
is_http_only: true
- name: Delete backend_set
oci_loadbalancer_backend_set:
# required
name: name_example
load_balancer_id: "ocid1.loadbalancer.oc1..xxxxxxEXAMPLExxxxxx"
state: absent
"""
RETURN = """
backend_set:
description:
- Details of the BackendSet resource acted upon by the current operation
returned: on success
type: complex
contains:
name:
description:
- A friendly name for the backend set. It must be unique and it cannot be changed.
- Valid backend set names include only alphanumeric characters, dashes, and underscores. Backend set names cannot
contain spaces. Avoid entering confidential information.
- "Example: `example_backend_set`"
returned: on success
type: str
sample: name_example
policy:
description:
- The load balancer policy for the backend set. To get a list of available policies, use the
L(ListPolicies,https://docs.cloud.oracle.com/en-us/iaas/api/#/en/loadbalancer/20170115/LoadBalancerPolicy/ListPolicies) operation.
- "Example: `LEAST_CONNECTIONS`"
returned: on success
type: str
sample: policy_example
backends:
description:
- ""
returned: on success
type: complex
contains:
name:
description:
- A read-only field showing the IP address and port that uniquely identify this backend server in the backend set.
- "Example: `10.0.0.3:8080`"
returned: on success
type: str
sample: name_example
ip_address:
description:
- The IP address of the backend server.
- "Example: `10.0.0.3`"
returned: on success
type: str
sample: ip_address_example
port:
description:
- The communication port for the backend server.
- "Example: `8080`"
returned: on success
type: int
sample: 56
weight:
description:
- The load balancing policy weight assigned to the server. Backend servers with a higher weight receive a larger
proportion of incoming traffic. For example, a server weighted '3' receives 3 times the number of new connections
as a server weighted '1'.
For more information on load balancing policies, see
L(How Load Balancing Policies Work,https://docs.cloud.oracle.com/Content/Balance/Reference/lbpolicies.htm).
- "Example: `3`"
returned: on success
type: int
sample: 56
drain:
description:
- "Whether the load balancer should drain this server. Servers marked \\"drain\\" receive no new
incoming traffic."
- "Example: `false`"
returned: on success
type: bool
sample: true
backup:
description:
- "Whether the load balancer should treat this server as a backup unit. If `true`, the load balancer forwards no ingress
traffic to this backend server unless all other backend servers not marked as \\"backup\\" fail the health check policy."
- "**Note:** You cannot add a backend server marked as `backup` to a backend set that uses the IP Hash policy."
- "Example: `false`"
returned: on success
type: bool
sample: true
offline:
description:
- Whether the load balancer should treat this server as offline. Offline servers receive no incoming
traffic.
- "Example: `false`"
returned: on success
type: bool
sample: true
health_checker:
description:
- ""
returned: on success
type: complex
contains:
protocol:
description:
- The protocol the health check must use; either HTTP or TCP.
- "Example: `HTTP`"
returned: on success
type: str
sample: protocol_example
url_path:
description:
- The path against which to run the health check.
- "Example: `/healthcheck`"
returned: on success
type: str
sample: url_path_example
port:
description:
- The backend server port against which to run the health check. If the port is not specified, the load balancer uses the
port information from the `Backend` object.
- "Example: `8080`"
returned: on success
type: int
sample: 56
return_code:
description:
- "The status code a healthy backend server should return. If you configure the health check policy to use the HTTP protocol,
you can use common HTTP status codes such as \\"200\\"."
- "Example: `200`"
returned: on success
type: int
sample: 56
retries:
description:
- "The number of retries to attempt before a backend server is considered \\"unhealthy\\". This number also applies
when recovering a server to the \\"healthy\\" state. Defaults to 3."
- "Example: `3`"
returned: on success
type: int
sample: 56
timeout_in_millis:
description:
- The maximum time, in milliseconds, to wait for a reply to a health check. A health check is successful only if a reply
returns within this timeout period. Defaults to 3000 (3 seconds).
- "Example: `3000`"
returned: on success
type: int
sample: 56
interval_in_millis:
description:
- The interval between health checks, in milliseconds. The default is 10000 (10 seconds).
- "Example: `10000`"
returned: on success
type: int
sample: 56
response_body_regex:
description:
- A regular expression for parsing the response body from the backend server.
- "Example: `^((?!false).|\\\\s)*$`"
returned: on success
type: str
sample: response_body_regex_example
ssl_configuration:
description:
- ""
returned: on success
type: complex
contains:
verify_depth:
description:
- The maximum depth for peer certificate chain verification.
- "Example: `3`"
returned: on success
type: int
sample: 56
verify_peer_certificate:
description:
- Whether the load balancer listener should verify peer certificates.
- "Example: `true`"
returned: on success
type: bool
sample: true
trusted_certificate_authority_ids:
description:
- Ids for OCI certificates service CA or CA bundles for the load balancer to trust.
- "Example: `[ocid1.cabundle.oc1.us-ashburn-1.amaaaaaaav3bgsaagl4zzyqdop5i2vuwoqewdvauuw34llqa74otq2jdsfyq]`"
returned: on success
type: list
sample: []
certificate_ids:
description:
- Ids for OCI certificates service certificates. Currently only a single Id may be passed.
- "Example: `[ocid1.certificate.oc1.us-ashburn-1.amaaaaaaav3bgsaa5o2q7rh5nfmkkukfkogasqhk6af2opufhjlqg7m6jqzq]`"
returned: on success
type: list
sample: []
certificate_name:
description:
- A friendly name for the certificate bundle. It must be unique and it cannot be changed.
Valid certificate bundle names include only alphanumeric characters, dashes, and underscores.
Certificate bundle names cannot contain spaces. Avoid entering confidential information.
- "Example: `example_certificate_bundle`"
returned: on success
type: str
sample: certificate_name_example
server_order_preference:
description:
- When this attribute is set to ENABLED, the system gives preference to the server ciphers over the client
ciphers.
- "**Note:** This configuration is applicable only when the load balancer is acting as an SSL/HTTPS server. This
field is ignored when the `SSLConfiguration` object is associated with a backend set."
returned: on success
type: str
sample: ENABLED
cipher_suite_name:
description:
- The name of the cipher suite to use for HTTPS or SSL connections.
- If this field is not specified, the default is `oci-default-ssl-cipher-suite-v1`.
- "**Notes:**"
- "* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite. Clients cannot perform an SSL handshake if there is an incompatible configuration.
* You must ensure compatibility between the ciphers configured in the cipher suite and the configured
certificates. For example, RSA-based ciphers require RSA certificates and ECDSA-based ciphers require ECDSA
certificates.
* If the cipher configuration is not modified after load balancer creation, the `GET` operation returns
`oci-default-ssl-cipher-suite-v1` as the value of this field in the SSL configuration for existing listeners
that predate this feature.
* If the cipher configuration was modified using Oracle operations after load balancer creation, the `GET`
operation returns `oci-customized-ssl-cipher-suite` as the value of this field in the SSL configuration for
existing listeners that predate this feature.
* The `GET` operation returns `oci-wider-compatible-ssl-cipher-suite-v1` as the value of this field in the SSL
configuration for existing backend sets that predate this feature.
* If the `GET` operation on a listener returns `oci-customized-ssl-cipher-suite` as the value of this field,
you must specify an appropriate predefined or custom cipher suite name when updating the resource.
* The `oci-customized-ssl-cipher-suite` Oracle reserved cipher suite name is not accepted as valid input for
this field."
- "example: `example_cipher_suite`"
returned: on success
type: str
sample: cipher_suite_name_example
protocols:
description:
- A list of SSL protocols the load balancer must support for HTTPS or SSL connections.
- The load balancer uses SSL protocols to establish a secure connection between a client and a server. A secure
connection ensures that all data passed between the client and the server is private.
- "The Load Balancing service supports the following protocols:"
- "* TLSv1
* TLSv1.1
* TLSv1.2"
- If this field is not specified, TLSv1.2 is the default.
- "**Warning:** All SSL listeners created on a given port must use the same set of SSL protocols."
- "**Notes:**"
- "* The handshake to establish an SSL connection fails if the client supports none of the specified protocols.
* You must ensure compatibility between the specified SSL protocols and the ciphers configured in the cipher
suite.
* For all existing load balancer listeners and backend sets that predate this feature, the `GET` operation
displays a list of SSL protocols currently used by those resources."
- "example: `[\\"TLSv1.1\\", \\"TLSv1.2\\"]`"
returned: on success
type: list
sample: []
session_persistence_configuration:
description:
- ""
returned: on success
type: complex
contains:
cookie_name:
description:
- "The name of the cookie used to detect a session initiated by the backend server. Use '*' to specify
that any cookie set by the backend causes the session to persist."
- "Example: `example_cookie`"
returned: on success
type: str
sample: cookie_name_example
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
returned: on success
type: bool
sample: true
lb_cookie_session_persistence_configuration:
description:
- ""
returned: on success
type: complex
contains:
cookie_name:
description:
- "The name of the cookie inserted by the load balancer. If this field is not configured, the cookie name defaults
to \\"X-Oracle-BMC-LBS-Route\\"."
- "Example: `example_cookie`"
- "**Notes:**"
- "* Ensure that the cookie name used at the backend application servers is different from the cookie name used
at the load balancer. To minimize the chance of name collision, Oracle recommends that you use a prefix
such as \\"X-Oracle-OCI-\\" for this field."
- "* If a backend server and the load balancer both insert cookies with the same name, the client or browser
behavior can vary depending on the domain and path values associated with the cookie. If the name, domain,
and path values of the `Set-cookie` generated by a backend server and the `Set-cookie` generated by the
load balancer are all the same, the client or browser treats them as one cookie and returns only one of
the cookie values in subsequent requests. If both `Set-cookie` names are the same, but the domain and path
names are different, the client or browser treats them as two different cookies."
returned: on success
type: str
sample: cookie_name_example
disable_fallback:
description:
- Whether the load balancer is prevented from directing traffic from a persistent session client to
a different backend server if the original server is unavailable. Defaults to false.
- "Example: `false`"
returned: on success
type: bool
sample: true
domain:
description:
- The domain in which the cookie is valid. The `Set-cookie` header inserted by the load balancer contains a
domain attribute with the specified value.
- This attribute has no default value. If you do not specify a value, the load balancer does not insert the domain
attribute into the `Set-cookie` header.
- "**Notes:**"
- "* L(RFC 6265 - HTTP State Management Mechanism,https://www.ietf.org/rfc/rfc6265.txt) describes client and
browser behavior when the domain attribute is present or not present in the `Set-cookie` header."
- If the value of the `Domain` attribute is `example.com` in the `Set-cookie` header, the client includes
the same cookie in the `Cookie` header when making HTTP requests to `example.com`, `www.example.com`, and
`www.abc.example.com`. If the `Domain` attribute is not present, the client returns the cookie only for
the domain to which the original request was made.
- "* Ensure that this attribute specifies the correct domain value. If the `Domain` attribute in the `Set-cookie`
header does not include the domain to which the original request was made, the client or browser might reject
the cookie. As specified in RFC 6265, the client accepts a cookie with the `Domain` attribute value `example.com`
or `www.example.com` sent from `www.example.com`. It does not accept a cookie with the `Domain` attribute
`abc.example.com` or `www.abc.example.com` sent from `www.example.com`."
- "Example: `example.com`"
returned: on success
type: str
sample: domain_example
path:
description:
- The path in which the cookie is valid. The `Set-cookie header` inserted by the load balancer contains a `Path`
attribute with the specified value.
- Clients include the cookie in an HTTP request only if the path portion of the request-uri matches, or is a
subdirectory of, the cookie's `Path` attribute.
- The default value is `/`.
- "Example: `/example`"
returned: on success
type: str
sample: path_example
max_age_in_seconds:
description:
- The amount of time the cookie remains valid. The `Set-cookie` header inserted by the load balancer contains
a `Max-Age` attribute with the specified value.
- The specified value must be at least one second. There is no default value for this attribute. If you do not
specify a value, the load balancer does not include the `Max-Age` attribute in the `Set-cookie` header. In
most cases, the client or browser retains the cookie until the current session ends, as defined by the client.
- "Example: `3600`"
returned: on success
type: int
sample: 56
is_secure:
description:
- Whether the `Set-cookie` header should contain the `Secure` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `Secure` attribute, which directs the client or browser to send the
cookie only using a secure protocol.
- "**Note:** If you set this field to `true`, you cannot associate the corresponding backend set with an HTTP
listener."
- "Example: `true`"
returned: on success
type: bool
sample: true
is_http_only:
description:
- Whether the `Set-cookie` header should contain the `HttpOnly` attribute. If `true`, the `Set-cookie` header
inserted by the load balancer contains the `HttpOnly` attribute, which limits the scope of the cookie to HTTP
requests. This attribute directs the client or browser to omit the cookie when providing access to cookies
through non-HTTP APIs. For example, it restricts the cookie from JavaScript channels.
- "Example: `true`"
returned: on success
type: bool
sample: true
sample: {
"name": "name_example",
"policy": "policy_example",
"backends": [{
"name": "name_example",
"ip_address": "ip_address_example",
"port": 56,
"weight": 56,
"drain": true,
"backup": true,
"offline": true
}],
"health_checker": {
"protocol": "protocol_example",
"url_path": "url_path_example",
"port": 56,
"return_code": 56,
"retries": 56,
"timeout_in_millis": 56,
"interval_in_millis": 56,
"response_body_regex": "response_body_regex_example"
},
"ssl_configuration": {
"verify_depth": 56,
"verify_peer_certificate": true,
"trusted_certificate_authority_ids": [],
"certificate_ids": [],
"certificate_name": "certificate_name_example",
"server_order_preference": "ENABLED",
"cipher_suite_name": "cipher_suite_name_example",
"protocols": []
},
"session_persistence_configuration": {
"cookie_name": "cookie_name_example",
"disable_fallback": true
},
"lb_cookie_session_persistence_configuration": {
"cookie_name": "cookie_name_example",
"disable_fallback": true,
"domain": "domain_example",
"path": "path_example",
"max_age_in_seconds": 56,
"is_secure": true,
"is_http_only": true
}
}
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.oracle.oci.plugins.module_utils import (
oci_common_utils,
oci_wait_utils,
)
from ansible_collections.oracle.oci.plugins.module_utils.oci_resource_utils import (
OCIResourceHelperBase,
get_custom_class,
)
try:
from oci.load_balancer import LoadBalancerClient
from oci.load_balancer.models import CreateBackendSetDetails
from oci.load_balancer.models import UpdateBackendSetDetails
HAS_OCI_PY_SDK = True
except ImportError:
HAS_OCI_PY_SDK = False
class BackendSetHelperGen(OCIResourceHelperBase):
def get_possible_entity_types(self):
return super(BackendSetHelperGen, self).get_possible_entity_types() + [
"backendset",
"backendsets",
"loadBalancerbackendset",
"loadBalancerbackendsets",
"backendsetresource",
"backendsetsresource",
"loadbalancer",
]
def get_module_resource_id_param(self):
return "name"
def get_module_resource_id(self):
return self.module.params.get("name")
def get_get_fn(self):
return self.client.get_backend_set
def get_resource(self):
return oci_common_utils.call_with_backoff(
self.client.get_backend_set,
load_balancer_id=self.module.params.get("load_balancer_id"),
backend_set_name=self.module.params.get("name"),
)
def get_required_kwargs_for_list(self):
required_list_method_params = [
"load_balancer_id",
]
return dict(
(param, self.module.params[param]) for param in required_list_method_params
)
def get_optional_kwargs_for_list(self):
return dict()
def list_resources(self):
required_kwargs = self.get_required_kwargs_for_list()
optional_kwargs = self.get_optional_kwargs_for_list()
kwargs = oci_common_utils.merge_dicts(required_kwargs, optional_kwargs)
return oci_common_utils.list_all_resources(
self.client.list_backend_sets, **kwargs
)
def get_create_model_class(self):
return CreateBackendSetDetails
def is_update(self):
if not self.module.params.get("state") == "present":
return False
return self.does_resource_exist()
def is_create(self):
if not self.module.params.get("state") == "present":
return False
return not self.does_resource_exist()
def create_resource(self):
create_details = self.get_create_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.create_backend_set,
call_fn_args=(),
call_fn_kwargs=dict(
create_backend_set_details=create_details,
load_balancer_id=self.module.params.get("load_balancer_id"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.CREATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def get_update_model_class(self):
return UpdateBackendSetDetails
def update_resource(self):
update_details = self.get_update_model()
return oci_wait_utils.call_and_wait(
call_fn=self.client.update_backend_set,
call_fn_args=(),
call_fn_kwargs=dict(
update_backend_set_details=update_details,
load_balancer_id=self.module.params.get("load_balancer_id"),
backend_set_name=self.module.params.get("name"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.UPDATE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
def delete_resource(self):
return oci_wait_utils.call_and_wait(
call_fn=self.client.delete_backend_set,
call_fn_args=(),
call_fn_kwargs=dict(
load_balancer_id=self.module.params.get("load_balancer_id"),
backend_set_name=self.module.params.get("name"),
),
waiter_type=oci_wait_utils.WORK_REQUEST_WAITER_KEY,
operation=oci_common_utils.DELETE_OPERATION_KEY,
waiter_client=self.get_waiter_client(),
resource_helper=self,
wait_for_states=oci_common_utils.get_work_request_completed_states(),
)
BackendSetHelperCustom = get_custom_class("BackendSetHelperCustom")
class ResourceHelper(BackendSetHelperCustom, BackendSetHelperGen):
pass
def main():
module_args = oci_common_utils.get_common_arg_spec(
supports_create=True, supports_wait=True
)
module_args.update(
dict(
name=dict(type="str", required=True),
policy=dict(type="str"),
backends=dict(
type="list",
elements="dict",
options=dict(
ip_address=dict(type="str", required=True),
port=dict(type="int", required=True),
weight=dict(type="int"),
backup=dict(type="bool"),
drain=dict(type="bool"),
offline=dict(type="bool"),
),
),
health_checker=dict(
type="dict",
options=dict(
protocol=dict(type="str", required=True),
url_path=dict(type="str"),
port=dict(type="int"),
return_code=dict(type="int"),
retries=dict(type="int"),
timeout_in_millis=dict(type="int"),
interval_in_millis=dict(type="int"),
response_body_regex=dict(type="str"),
),
),
ssl_configuration=dict(
type="dict",
options=dict(
verify_depth=dict(type="int"),
verify_peer_certificate=dict(type="bool"),
trusted_certificate_authority_ids=dict(type="list", elements="str"),
certificate_ids=dict(type="list", elements="str"),
certificate_name=dict(type="str"),
protocols=dict(type="list", elements="str"),
cipher_suite_name=dict(type="str"),
server_order_preference=dict(
type="str", choices=["ENABLED", "DISABLED"]
),
),
),
session_persistence_configuration=dict(
type="dict",
options=dict(
cookie_name=dict(type="str", required=True),
disable_fallback=dict(type="bool"),
),
),
lb_cookie_session_persistence_configuration=dict(
type="dict",
options=dict(
cookie_name=dict(type="str"),
disable_fallback=dict(type="bool"),
domain=dict(type="str"),
path=dict(type="str"),
max_age_in_seconds=dict(type="int"),
is_secure=dict(type="bool"),
is_http_only=dict(type="bool"),
),
),
load_balancer_id=dict(aliases=["id"], type="str", required=True),
state=dict(type="str", default="present", choices=["present", "absent"]),
)
)
module = AnsibleModule(argument_spec=module_args, supports_check_mode=True)
if not HAS_OCI_PY_SDK:
module.fail_json(msg="oci python sdk required for this module.")
resource_helper = ResourceHelper(
module=module,
resource_type="backend_set",
service_client_class=LoadBalancerClient,
namespace="load_balancer",
)
result = dict(changed=False)
if resource_helper.is_delete():
result = resource_helper.delete()
elif resource_helper.is_update():
result = resource_helper.update()
elif resource_helper.is_create():
result = resource_helper.create()
module.exit_json(**result)
if __name__ == "__main__":
main()
| true | true |
f723dcbf78bff84e6aa9186e1ba18550f5791807 | 4,002 | py | Python | CodingEasy/settings.py | Atif0604/CodingEasy | 75d7e88dd7ab514ee4fdaa4b1b80175d78c5a91c | [
"MIT"
] | 40 | 2021-12-22T15:16:03.000Z | 2022-03-26T08:24:04.000Z | CodingEasy/settings.py | Atif0604/CodingEasy | 75d7e88dd7ab514ee4fdaa4b1b80175d78c5a91c | [
"MIT"
] | 222 | 2021-12-14T05:37:10.000Z | 2022-03-31T16:38:59.000Z | CodingEasy/settings.py | Atif0604/CodingEasy | 75d7e88dd7ab514ee4fdaa4b1b80175d78c5a91c | [
"MIT"
] | 89 | 2021-12-14T05:00:23.000Z | 2022-03-29T10:55:25.000Z | """
Django settings for CodingEasy project.
Generated by 'django-admin startproject' using Django 4.0.1.
For more information on this file, see
https://docs.djangoproject.com/en/4.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/4.0/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'django-insecure-x_1=q*b(j*34f(dg0^2sa)-f$k^!0d(qa=@geze9s@8)-(!hy5'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home.apps.HomeConfig',
'blog.apps.BlogConfig',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CodingEasy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CodingEasy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
'/var/www/static/',
]
# Make a directory to save user profile images
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Crispy Forms Styling Engine definition
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Register the Login Redirect URL
LOGIN_REDIRECT_URL = 'home-index'
# Restricted pages automatically redirect at login form
LOGIN_URL = 'login'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = '##########' # Put your gmail address here
EMAIL_HOST_PASSWORD = '##########' # Put your gmail address password here
| 26.156863 | 91 | 0.703648 |
from pathlib import Path
BASE_DIR = Path(__file__).resolve().parent.parent
SECRET_KEY = 'django-insecure-x_1=q*b(j*34f(dg0^2sa)-f$k^!0d(qa=@geze9s@8)-(!hy5'
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'home.apps.HomeConfig',
'blog.apps.BlogConfig',
'crispy_forms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'CodingEasy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'CodingEasy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/4.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/4.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/4.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/4.0/howto/static-files/
STATIC_URL = 'static/'
STATICFILES_DIRS = [
BASE_DIR / "static",
'/var/www/static/',
]
# Make a directory to save user profile images
MEDIA_ROOT = BASE_DIR / 'media'
MEDIA_URL = '/media/'
# Default primary key field type
# https://docs.djangoproject.com/en/4.0/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
# Crispy Forms Styling Engine definition
CRISPY_TEMPLATE_PACK = 'bootstrap4'
# Register the Login Redirect URL
LOGIN_REDIRECT_URL = 'home-index'
# Restricted pages automatically redirect at login form
LOGIN_URL = 'login'
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = ' | true | true |
f723def610c1a3cf3e0216947eb0d00eb4392e68 | 9,125 | py | Python | tf_object_detection/to_tfrecords.py | AndresGarciaEscalante/bstld | cc37fb3388b7731be9e76fd1c4e2be13b6716afe | [
"MIT"
] | null | null | null | tf_object_detection/to_tfrecords.py | AndresGarciaEscalante/bstld | cc37fb3388b7731be9e76fd1c4e2be13b6716afe | [
"MIT"
] | null | null | null | tf_object_detection/to_tfrecords.py | AndresGarciaEscalante/bstld | cc37fb3388b7731be9e76fd1c4e2be13b6716afe | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Creates full-image tfrecords to use the Bosch Small Traffic Lights Dataset
with the Tensorflow Object Detection API.
The training set is split into training and validation. Tfrecords are created
for a training, validation, and test set. Labels are grouped by their respective
colors to simplify training and because the test-set does not contain any arrows.
Depending on the training method, you may want to look into creating random crops
from the images which can increase training performance due to translated inputs.
The tfrecords come without any image augmentation.
The created tfrecords will be about 18GB.
Usage:
In the folder with the extracted traffic lights dataset, run
python /path/to/this/file/to_tfrecords.py
and it will create the tfrecords there.
The path of the annotation files, tfrecords, and dataset folder can be specified.
Note that this is a tutorial file. There are only few checks and no logging.
"""
import argparse
from collections import OrderedDict, defaultdict
import hashlib
import os
from random import shuffle
import cv2
import tensorflow as tf
import tqdm
# https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md
from object_detection.utils import dataset_util
import sys
# getting the name of the directory
# where the this file is present.
current = os.path.dirname(os.path.realpath(__file__))
# Getting the parent directory name
# where the current directory is present.
parent = os.path.dirname(current)
# adding the parent directory to
# the sys.path.
sys.path.append(parent)
from read_label_file import get_all_labels
from tf_object_detection import constants
def label_id(label_string):
""" For detections without classification """
# For object proposals only, you could return 1
return constants.TF_ID_MAP[constants.SIMPLIFIED_CLASSES[label_string]]
def modified_label_string(label_string):
""" To simplify the problem, training classes are grouped by color """
return constants.SIMPLIFIED_CLASSES[label_string].encode('utf8')
def list_of_dicts_to_dict_of_lists(list_of_dicts):
""" [{'a': 0, 'b':3}, {'a': 3, 'b':5}] --> {'a': [0, 3], 'b': [3, 5]}"""
assert isinstance(list_of_dicts, list)
dict_lists = defaultdict(list)
for some_dict in list_of_dicts:
for key, value in some_dict.items():
dict_lists[key].append(value)
return dict_lists
def clip(some_value):
""" Clip values outside [0, 1]. float -> float """
# Just in case some very eager annotators detected lights outside the image. It happens
return max(0, min(some_value, 1))
def create_object_detection_tfrecords(labels, tfrecords_path, dataset_folder, set_name=''):
""" Creates a tfrecord dataset specific to tensorflow/models/research/objection_detection
params:
labels: list of annotations as defined in annotation yamls
tfrecords_path: output path to create tfrecords
dataset_folder: path to bstld folder, must include rgb directory
"""
#shuffle(labels)
writer = tf.io.TFRecordWriter(tfrecords_path)
for label in tqdm.tqdm(labels, desc='Creating {}-set'.format(set_name)):
image_path = os.path.join(dataset_folder, label['path'])
image = cv2.imread(image_path)
if image is None:
print('Did you extract the training, validation, and additional images?')
raise IOError('Missing: {}'.format(image_path))
height, width, _ = image.shape
boxes = list_of_dicts_to_dict_of_lists(label['boxes'])
classes = boxes['label']
xmin = list(map(lambda x: clip(x / float(width)), boxes['x_min']))
ymin = list(map(lambda y: clip(y / float(height)), boxes['y_min']))
xmax = list(map(lambda x: clip(x / float(width)), boxes['x_max']))
ymax = list(map(lambda y: clip(y / float(height)), boxes['y_max']))
assert len(xmin) == len(xmax) == len(ymin)
assert len(ymax) == len(classes) == len(label['boxes'])
if not classes:
continue # We don't need empty images, there are enough negatives
_, image = cv2.imencode('.png', image) # Assuming that works
image = image.tostring()
sha256 = hashlib.sha256(image).hexdigest()
image_format = 'png'
complete_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'image/format': dataset_util.bytes_feature(image_format.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(sha256.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(
list(map(modified_label_string, classes))),
'image/object/class/label': dataset_util.int64_list_feature(
list(map(label_id, classes))),
}))
writer.write(complete_example.SerializeToString())
writer.close()
def split_train_labels(train_labels):
# one entry for each image in a folder/video to check their sizes later
train_videos = [os.path.split(os.path.split(train_label['path'])[0])[1]
for train_label in train_labels]
# NOTE Because set order is not guaranteed (and we want to support different Python versions)
video_dict = OrderedDict().fromkeys(train_videos)
video_lengths = [train_videos.count(video) for video in video_dict.keys()]
# The first three videos are used for the validation set.
# Note that this may not be a completely clean validation set as the sequences
# were captured independently but may be on the same day and are taken within
# the same general area. This split is for object detection demonstation
# purposes only. For clean dataset separation, the sequences would need to be
# recorded on separate days and preferably in different areas.
#
# validation samples: 933, training samples: 4160 (+215 additional)
num_valid_samples = sum(video_lengths[:3])
return train_labels[num_valid_samples:], train_labels[:num_valid_samples]
def create_datasets(config):
""" Splits labels and creates datasets """
train_labels = get_all_labels(config['train_yaml'])
test_labels = get_all_labels(config['test_yaml'])
if config['additional_yaml']:
additional_labels = get_all_labels(config['additional_yaml'])
# Split training labels into training and validation for "more correct" validation
train_labels, valid_labels = split_train_labels(train_labels)
train_labels.extend(additional_labels) # add unappealing images to training set
if not os.path.isdir(config['dataset_folder']) or\
not os.path.isdir(os.path.join(config['dataset_folder'], 'rgb')):
print('Dataset_folder needs to contain extracted dataset, including the rgb folder')
print('{} does not fulfill those requirements'.format(config['dataset_folder']))
create_object_detection_tfrecords(
train_labels, config['train_tfrecord'], config['dataset_folder'], 'train')
create_object_detection_tfrecords(
valid_labels, config['valid_tfrecord'], config['dataset_folder'], 'valid')
create_object_detection_tfrecords(
test_labels, config['test_tfrecord'], config['dataset_folder'], 'test')
print('Done creating tfrecords')
def parse_args():
""" Command line args to tfrecords creation config """
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--train_yaml', default='train.yaml',
help='Path to train.yaml')
parser.add_argument('--test_yaml', default='test.yaml',
help='Path to test.yaml')
parser.add_argument('--additional_yaml', default='additional_train.yaml',
help='Path to train_additional.yaml')
parser.add_argument('--dataset_folder', default='.',
help='Path to dataset folder')
parser.add_argument('--train_tfrecord', default='train.tfrecords',
help='Path to train.tfrecord')
parser.add_argument('--valid_tfrecord', default='valid.tfrecords',
help='Path to valid.tfrecord')
parser.add_argument('--test_tfrecord', default='test.tfrecords',
help='Path to test.tfrecord')
args = vars(parser.parse_args())
return args
if __name__ == '__main__':
config = parse_args()
create_datasets(config)
| 43.452381 | 98 | 0.699288 |
import argparse
from collections import OrderedDict, defaultdict
import hashlib
import os
from random import shuffle
import cv2
import tensorflow as tf
import tqdm
from object_detection.utils import dataset_util
import sys
current = os.path.dirname(os.path.realpath(__file__))
parent = os.path.dirname(current)
sys.path.append(parent)
from read_label_file import get_all_labels
from tf_object_detection import constants
def label_id(label_string):
return constants.TF_ID_MAP[constants.SIMPLIFIED_CLASSES[label_string]]
def modified_label_string(label_string):
return constants.SIMPLIFIED_CLASSES[label_string].encode('utf8')
def list_of_dicts_to_dict_of_lists(list_of_dicts):
assert isinstance(list_of_dicts, list)
dict_lists = defaultdict(list)
for some_dict in list_of_dicts:
for key, value in some_dict.items():
dict_lists[key].append(value)
return dict_lists
def clip(some_value):
return max(0, min(some_value, 1))
def create_object_detection_tfrecords(labels, tfrecords_path, dataset_folder, set_name=''):
writer = tf.io.TFRecordWriter(tfrecords_path)
for label in tqdm.tqdm(labels, desc='Creating {}-set'.format(set_name)):
image_path = os.path.join(dataset_folder, label['path'])
image = cv2.imread(image_path)
if image is None:
print('Did you extract the training, validation, and additional images?')
raise IOError('Missing: {}'.format(image_path))
height, width, _ = image.shape
boxes = list_of_dicts_to_dict_of_lists(label['boxes'])
classes = boxes['label']
xmin = list(map(lambda x: clip(x / float(width)), boxes['x_min']))
ymin = list(map(lambda y: clip(y / float(height)), boxes['y_min']))
xmax = list(map(lambda x: clip(x / float(width)), boxes['x_max']))
ymax = list(map(lambda y: clip(y / float(height)), boxes['y_max']))
assert len(xmin) == len(xmax) == len(ymin)
assert len(ymax) == len(classes) == len(label['boxes'])
if not classes:
continue
_, image = cv2.imencode('.png', image) # Assuming that works
image = image.tostring()
sha256 = hashlib.sha256(image).hexdigest()
image_format = 'png'
complete_example = tf.train.Example(features=tf.train.Features(feature={
'image/height': dataset_util.int64_feature(height),
'image/width': dataset_util.int64_feature(width),
'image/filename': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/encoded': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image])),
'image/format': dataset_util.bytes_feature(image_format.encode('utf8')),
'image/source_id': dataset_util.bytes_feature(image_path.encode('utf8')),
'image/key/sha256': dataset_util.bytes_feature(sha256.encode('utf8')),
'image/object/bbox/xmin': dataset_util.float_list_feature(xmin),
'image/object/bbox/xmax': dataset_util.float_list_feature(xmax),
'image/object/bbox/ymin': dataset_util.float_list_feature(ymin),
'image/object/bbox/ymax': dataset_util.float_list_feature(ymax),
'image/object/class/text': dataset_util.bytes_list_feature(
list(map(modified_label_string, classes))),
'image/object/class/label': dataset_util.int64_list_feature(
list(map(label_id, classes))),
}))
writer.write(complete_example.SerializeToString())
writer.close()
def split_train_labels(train_labels):
# one entry for each image in a folder/video to check their sizes later
train_videos = [os.path.split(os.path.split(train_label['path'])[0])[1]
for train_label in train_labels]
# NOTE Because set order is not guaranteed (and we want to support different Python versions)
video_dict = OrderedDict().fromkeys(train_videos)
video_lengths = [train_videos.count(video) for video in video_dict.keys()]
# The first three videos are used for the validation set.
# Note that this may not be a completely clean validation set as the sequences
# were captured independently but may be on the same day and are taken within
# the same general area. This split is for object detection demonstation
# purposes only. For clean dataset separation, the sequences would need to be
# recorded on separate days and preferably in different areas.
#
# validation samples: 933, training samples: 4160 (+215 additional)
num_valid_samples = sum(video_lengths[:3])
return train_labels[num_valid_samples:], train_labels[:num_valid_samples]
def create_datasets(config):
train_labels = get_all_labels(config['train_yaml'])
test_labels = get_all_labels(config['test_yaml'])
if config['additional_yaml']:
additional_labels = get_all_labels(config['additional_yaml'])
# Split training labels into training and validation for "more correct" validation
train_labels, valid_labels = split_train_labels(train_labels)
train_labels.extend(additional_labels) # add unappealing images to training set
if not os.path.isdir(config['dataset_folder']) or\
not os.path.isdir(os.path.join(config['dataset_folder'], 'rgb')):
print('Dataset_folder needs to contain extracted dataset, including the rgb folder')
print('{} does not fulfill those requirements'.format(config['dataset_folder']))
create_object_detection_tfrecords(
train_labels, config['train_tfrecord'], config['dataset_folder'], 'train')
create_object_detection_tfrecords(
valid_labels, config['valid_tfrecord'], config['dataset_folder'], 'valid')
create_object_detection_tfrecords(
test_labels, config['test_tfrecord'], config['dataset_folder'], 'test')
print('Done creating tfrecords')
def parse_args():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--train_yaml', default='train.yaml',
help='Path to train.yaml')
parser.add_argument('--test_yaml', default='test.yaml',
help='Path to test.yaml')
parser.add_argument('--additional_yaml', default='additional_train.yaml',
help='Path to train_additional.yaml')
parser.add_argument('--dataset_folder', default='.',
help='Path to dataset folder')
parser.add_argument('--train_tfrecord', default='train.tfrecords',
help='Path to train.tfrecord')
parser.add_argument('--valid_tfrecord', default='valid.tfrecords',
help='Path to valid.tfrecord')
parser.add_argument('--test_tfrecord', default='test.tfrecords',
help='Path to test.tfrecord')
args = vars(parser.parse_args())
return args
if __name__ == '__main__':
config = parse_args()
create_datasets(config)
| true | true |
f723e0a0bf9e03267963a7c69b5889fe7fcdda12 | 2,069 | py | Python | scripts/generate_delta_sysroot_unittest.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | scripts/generate_delta_sysroot_unittest.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | scripts/generate_delta_sysroot_unittest.py | hustwei/chromite | 10eb79abeb64e859362546214b7e039096ac9830 | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2013 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for generate_delta_sysroot."""
from __future__ import print_function
import os
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.scripts import generate_delta_sysroot as gds
# pylint: disable=W0212
def _Parse(argv):
return gds._ParseCommandLine(argv)
class InterfaceTest(cros_test_lib.OutputTestCase,
cros_test_lib.TempDirTestCase):
"""Test the commandline interface of the script"""
def testNoBoard(self):
"""Test no board specified."""
argv = ['--out-dir', '/path/to/nowhere']
self.assertParseError(argv)
def testNoOutDir(self):
"""Test no out dir specified."""
argv = ['--board', 'link']
self.assertParseError(argv)
def testCorrectArgv(self):
"""Test successful parsing"""
argv = ['--board', 'link', '--out-dir', self.tempdir]
options = _Parse(argv)
gds.FinishParsing(options)
def testTestsSet(self):
"""Test successful parsing"""
argv = ['--board', 'link', '--out-dir', self.tempdir]
options = _Parse(argv)
self.assertTrue(options.build_tests)
def testNoTestsSet(self):
"""Test successful parsing"""
argv = ['--board', 'link', '--out-dir', self.tempdir, '--skip-tests']
options = _Parse(argv)
self.assertFalse(options.build_tests)
def assertParseError(self, argv):
"""Helper to assert parsing error, given argv."""
with self.OutputCapturer():
self.assertRaises2(SystemExit, _Parse, argv)
class TestCreateBatchFile(cros_test_lib.TempDirTestCase):
"""Test the batch file creation."""
def testSourceDirDoesNotExist(self):
"""Test error is raised if there is no source directory."""
no_source = os.path.join(self.tempdir, 'foo/bar/cow')
self.assertRaises2(
cros_build_lib.RunCommandError, gds.CreateBatchFile,
no_source, self.tempdir, os.path.join(self.tempdir, 'batch'))
| 29.985507 | 73 | 0.697438 |
from __future__ import print_function
import os
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.scripts import generate_delta_sysroot as gds
def _Parse(argv):
return gds._ParseCommandLine(argv)
class InterfaceTest(cros_test_lib.OutputTestCase,
cros_test_lib.TempDirTestCase):
def testNoBoard(self):
argv = ['--out-dir', '/path/to/nowhere']
self.assertParseError(argv)
def testNoOutDir(self):
argv = ['--board', 'link']
self.assertParseError(argv)
def testCorrectArgv(self):
argv = ['--board', 'link', '--out-dir', self.tempdir]
options = _Parse(argv)
gds.FinishParsing(options)
def testTestsSet(self):
argv = ['--board', 'link', '--out-dir', self.tempdir]
options = _Parse(argv)
self.assertTrue(options.build_tests)
def testNoTestsSet(self):
argv = ['--board', 'link', '--out-dir', self.tempdir, '--skip-tests']
options = _Parse(argv)
self.assertFalse(options.build_tests)
def assertParseError(self, argv):
with self.OutputCapturer():
self.assertRaises2(SystemExit, _Parse, argv)
class TestCreateBatchFile(cros_test_lib.TempDirTestCase):
def testSourceDirDoesNotExist(self):
no_source = os.path.join(self.tempdir, 'foo/bar/cow')
self.assertRaises2(
cros_build_lib.RunCommandError, gds.CreateBatchFile,
no_source, self.tempdir, os.path.join(self.tempdir, 'batch'))
| true | true |
f723e168770bfc02d7b2018b83bc4abe150a4e30 | 678 | py | Python | ex9_1_applications_agumentation.py | soyoung9306/-3-keras | e65f40171aadef3fe0b59c649b55b3f0bd09ca41 | [
"MIT"
] | 200 | 2017-10-23T05:05:34.000Z | 2022-01-25T00:58:45.000Z | ex9_1_applications_agumentation.py | MyeongHaHwang/keraspp | 4090fcc86072cda816d1d6056b5113ace49534ae | [
"MIT"
] | 8 | 2018-02-07T08:33:49.000Z | 2020-09-11T20:59:30.000Z | ex9_1_applications_agumentation.py | MyeongHaHwang/keraspp | 4090fcc86072cda816d1d6056b5113ace49534ae | [
"MIT"
] | 135 | 2017-12-15T05:41:47.000Z | 2021-12-15T12:21:09.000Z | """
CH 9.1 Applications/Image Augmentation
"""
from sklearn import model_selection
from keras import datasets
import keras
assert keras.backend.image_data_format() == 'channels_last'
from keraspp import aigen
class Machine(aigen.Machine_Generator):
def __init__(self):
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
_, X, _, y = model_selection.train_test_split(x_train, y_train, test_size=0.02)
X = X.astype(float)
gen_param_dict = {'rotation_range': 10}
super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict)
def main():
m = Machine()
m.run()
if __name__ == '__main__':
main() | 23.37931 | 87 | 0.690265 | from sklearn import model_selection
from keras import datasets
import keras
assert keras.backend.image_data_format() == 'channels_last'
from keraspp import aigen
class Machine(aigen.Machine_Generator):
def __init__(self):
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
_, X, _, y = model_selection.train_test_split(x_train, y_train, test_size=0.02)
X = X.astype(float)
gen_param_dict = {'rotation_range': 10}
super().__init__(X, y, nb_classes=10, gen_param_dict=gen_param_dict)
def main():
m = Machine()
m.run()
if __name__ == '__main__':
main() | true | true |
f723e1dd1990acf6f94ec2386a12f7b48ddc2589 | 274 | py | Python | core/index_db/index_object.py | AliRezaBeigy/Gitools | b7defd332bb144cb45962a351b1f56e941c8ca4b | [
"MIT"
] | 6 | 2021-01-06T05:18:06.000Z | 2022-03-17T06:44:29.000Z | core/index_db/index_object.py | AliRezaBeigy/Gitools | b7defd332bb144cb45962a351b1f56e941c8ca4b | [
"MIT"
] | null | null | null | core/index_db/index_object.py | AliRezaBeigy/Gitools | b7defd332bb144cb45962a351b1f56e941c8ca4b | [
"MIT"
] | 1 | 2021-04-15T20:51:26.000Z | 2021-04-15T20:51:26.000Z | class IndexObject:
hash: str
crc32: int
pack_end_offset: int
pack_start_offset: int
def __init__(self, hash: str, crc32: int, pack_start_offset: int):
self.hash = hash
self.crc32 = crc32
self.pack_start_offset = pack_start_offset | 27.4 | 70 | 0.667883 | class IndexObject:
hash: str
crc32: int
pack_end_offset: int
pack_start_offset: int
def __init__(self, hash: str, crc32: int, pack_start_offset: int):
self.hash = hash
self.crc32 = crc32
self.pack_start_offset = pack_start_offset | true | true |
f723e22ccbf007fd40e2124e7515473ede314c02 | 1,440 | py | Python | setup.py | dharif23/xtermcolors | fb35b9b7a04fbf7a0ea236bb94275240c6322b1a | [
"MIT"
] | null | null | null | setup.py | dharif23/xtermcolors | fb35b9b7a04fbf7a0ea236bb94275240c6322b1a | [
"MIT"
] | null | null | null | setup.py | dharif23/xtermcolors | fb35b9b7a04fbf7a0ea236bb94275240c6322b1a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Dalwar Hossain'
__email__ = 'dalwar.hossain@protonmail.com'
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='pyrainbowterm',
version='1.0',
description='pyrainbowterm - Smart custom print function with color and log information support',
long_description=readme(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'],
keywords='terminal colors xterm python colored output',
url='https://github.com/dharif23/pyrainbowterm',
author='Dalwar Hossain',
author_email='dalwar.hossain@protonmail.com',
license='MIT',
packages=['pyrainbowterm'],
include_package_data=True,
zip_safe=False,
)
| 32.727273 | 103 | 0.603472 |
__author__ = 'Dalwar Hossain'
__email__ = 'dalwar.hossain@protonmail.com'
from setuptools import setup
def readme():
with open('README.md') as f:
return f.read()
setup(name='pyrainbowterm',
version='1.0',
description='pyrainbowterm - Smart custom print function with color and log information support',
long_description=readme(),
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries :: Python Modules'],
keywords='terminal colors xterm python colored output',
url='https://github.com/dharif23/pyrainbowterm',
author='Dalwar Hossain',
author_email='dalwar.hossain@protonmail.com',
license='MIT',
packages=['pyrainbowterm'],
include_package_data=True,
zip_safe=False,
)
| true | true |
f723e3633555478d9d24ed98d7498972bfe2deda | 13,424 | py | Python | pyabsa/core/tc/prediction/text_classifier.py | yangheng95/PyABSA | f5b46047a58fa8054a0469486be3f1cada933814 | [
"MIT"
] | 199 | 2021-06-07T15:07:28.000Z | 2022-03-31T11:53:28.000Z | pyabsa/core/tc/prediction/text_classifier.py | yangheng95/PyABSA | f5b46047a58fa8054a0469486be3f1cada933814 | [
"MIT"
] | 98 | 2021-06-06T06:01:02.000Z | 2022-03-31T15:48:28.000Z | pyabsa/core/tc/prediction/text_classifier.py | yangheng95/PyABSA | f5b46047a58fa8054a0469486be3f1cada933814 | [
"MIT"
] | 55 | 2021-06-10T08:52:17.000Z | 2022-03-31T11:08:58.000Z | # -*- coding: utf-8 -*-
# file: text_classifier.py
# author: yangheng <yangheng@m.scnu.edu.cn>
# Copyright (C) 2020. All Rights Reserved.
import json
import os
import pickle
import random
import numpy
import torch
from findfile import find_file
from termcolor import colored
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from pyabsa.functional.dataset import detect_infer_dataset
from ..models import GloVeClassificationModelList, BERTClassificationModelList
from ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset
from ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset
from ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer
from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError
class TextClassifier:
def __init__(self, model_arg=None, label_map=None, eval_batch_size=128):
'''
from_train_model: load inferring_tutorials model from trained model
'''
self.initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_
}
# load from a training
if not isinstance(model_arg, str):
print('Load text classifier from training')
self.model = model_arg[0]
self.opt = model_arg[1]
self.tokenizer = model_arg[2]
else:
try:
if 'fine-tuned' in model_arg:
raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')
print('Load text classifier from', model_arg)
state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])
model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])
tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])
config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])
print('config: {}'.format(config_path))
print('state_dict: {}'.format(state_dict_path))
print('model: {}'.format(model_path))
print('tokenizer: {}'.format(tokenizer_path))
self.opt = pickle.load(open(config_path, mode='rb'))
if state_dict_path or model_path:
if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()):
if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args:
if 'pretrained_bert_name' in self.opt.args:
self.opt.pretrained_bert = self.opt.pretrained_bert_name
if state_dict_path:
try:
self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert)
self.model = self.opt.model(self.bert, self.opt)
except ValueError:
raise TransformerConnectionError()
elif model_path:
if model_path:
self.model = torch.load(model_path, map_location='cpu')
if tokenizer_path:
self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))
else:
raise ValueError('No .tokenizer found!')
else:
self.tokenizer = build_tokenizer(
dataset_list=self.opt.dataset_file,
max_seq_len=self.opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
if model_path:
self.model = torch.load(model_path, map_location='cpu')
else:
self.embedding_matrix = build_embedding_matrix(
word2idx=self.tokenizer.word2idx,
embed_dim=self.opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
print('Config used in Training:')
print_args(self.opt, mode=1)
except Exception as e:
raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))
if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \
and not hasattr(BERTClassificationModelList, self.model.__class__.__name__):
raise KeyError('The checkpoint you are loading is not from classifier model.')
if hasattr(BERTClassificationModelList, self.opt.model.__name__):
self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeClassificationModelList, self.opt.model.__name__):
self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)
self.opt.inputs_cols = self.model.inputs
self.infer_dataloader = None
self.opt.eval_batch_size = eval_batch_size
if self.opt.seed is not None:
random.seed(self.opt.seed)
numpy.random.seed(self.opt.seed)
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.opt.initializer = self.opt.initializer
self.label_map = None
self.set_label_map(label_map)
def set_label_map(self, label_map):
if label_map:
print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red'))
label_map[LABEL_PADDING] = ''
self.label_map = label_map
def to(self, device=None):
self.opt.device = device
self.model.to(device)
def cpu(self):
self.opt.device = 'cpu'
self.model.to('cpu')
def cuda(self, device='cuda:0'):
self.opt.device = device
self.model.to(device)
def _log_write_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
print(
'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
for arg in vars(self.opt):
if getattr(self.opt, arg) is not None:
print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def batch_infer(self,
target_file=None,
print_result=True,
save_result=False,
clear_input_samples=True,
ignore_error=True):
if clear_input_samples:
self.clear_input_samples()
save_path = os.path.join(os.getcwd(), 'text_classification.result.json')
target_file = detect_infer_dataset(target_file, task='text_classification')
if not target_file:
raise FileNotFoundError('Can not find inference datasets!')
self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)
return self._infer(save_path=save_path if save_result else None, print_result=print_result)
def infer(self, text: str = None,
print_result=True,
clear_input_samples=True):
if clear_input_samples:
self.clear_input_samples()
if text:
self.dataset.prepare_infer_sample(text)
else:
raise RuntimeError('Please specify your datasets path!')
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)
return self._infer(print_result=print_result)
def merge_results(self, results):
""" merge APC results have the same input text
"""
final_res = []
for result in results:
if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()):
final_res[-1]['label'].append(result['label'])
final_res[-1]['ref_label'].append(result['ref_label'])
final_res[-1]['ref_check'].append(result['ref_check'])
else:
final_res.append(
{
'text': result['text'].replace(' ', ' '),
'label': [result['label']],
'ref_label': [result['ref_label']],
'ref_check': [result['ref_check']]
}
)
return final_res
def _infer(self, save_path=None, print_result=True):
_params = filter(lambda p: p.requires_grad, self.model.parameters())
correct = {True: 'Correct', False: 'Wrong'}
results = []
with torch.no_grad():
self.model.eval()
n_correct = 0
n_labeled = 0
n_total = 0
for _, sample in enumerate(self.infer_dataloader):
inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label']
self.model.eval()
outputs = self.model(inputs)
sen_logits = outputs
t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()
for i, i_probs in enumerate(t_probs):
if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)):
sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]
if sample['label'] != -999:
real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.')
else:
real_sent = 'N.A.'
if real_sent != -999 and real_sent != '-999':
n_labeled += 1
if sent == real_sent:
n_correct += 1
else: # for the former versions until 1.2.0
sent = int(i_probs.argmax(axis=-1))
real_sent = int(sample['label'][i])
text_raw = sample['text_raw'][i]
results.append({
'text': text_raw,
'label': sent,
'ref_label': real_sent,
'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',
})
n_total += 1
if len(self.infer_dataloader) > 1:
print('Total samples:{}'.format(n_total))
print('Labeled samples:{}'.format(n_labeled))
print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))
try:
if print_result:
for result in results:
text_printing = result['text']
if result['ref_label'] != -999:
if result['label'] == result['ref_label']:
text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green')
else:
text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red')
else:
text_info = ' -> {}'.format(result['label'])
text_printing += text_info
print(text_printing)
if save_path:
fout = open(save_path, 'w', encoding='utf8')
json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)
# fout.write('Total samples:{}\n'.format(n_total))
# fout.write('Labeled samples:{}\n'.format(n_labeled))
# fout.write('Prediction Accuracy:{}%\n'.format(100 * n_correct / n_labeled)) if n_labeled else 'N.A.'
print('inference result saved in: {}'.format(save_path))
except Exception as e:
print('Can not save result: {}, Exception: {}'.format(text_raw, e))
return results
def clear_input_samples(self):
self.dataset.all_data = []
| 45.505085 | 163 | 0.560489 |
import json
import os
import pickle
import random
import numpy
import torch
from findfile import find_file
from termcolor import colored
from torch.utils.data import DataLoader
from transformers import AutoTokenizer, AutoModel
from pyabsa.functional.dataset import detect_infer_dataset
from ..models import GloVeClassificationModelList, BERTClassificationModelList
from ..classic.__glove__.dataset_utils.data_utils_for_inferring import GloVeClassificationDataset
from ..classic.__bert__.dataset_utils.data_utils_for_inferring import BERTClassificationDataset
from ..classic.__glove__.dataset_utils.data_utils_for_training import LABEL_PADDING, build_embedding_matrix, build_tokenizer
from pyabsa.utils.pyabsa_utils import print_args, TransformerConnectionError
class TextClassifier:
def __init__(self, model_arg=None, label_map=None, eval_batch_size=128):
self.initializers = {
'xavier_uniform_': torch.nn.init.xavier_uniform_,
'xavier_normal_': torch.nn.init.xavier_normal,
'orthogonal_': torch.nn.init.orthogonal_
}
if not isinstance(model_arg, str):
print('Load text classifier from training')
self.model = model_arg[0]
self.opt = model_arg[1]
self.tokenizer = model_arg[2]
else:
try:
if 'fine-tuned' in model_arg:
raise ValueError('Do not support to directly load a fine-tuned model, please load a .state_dict or .model instead!')
print('Load text classifier from', model_arg)
state_dict_path = find_file(model_arg, '.state_dict', exclude_key=['__MACOSX'])
model_path = find_file(model_arg, '.model', exclude_key=['__MACOSX'])
tokenizer_path = find_file(model_arg, '.tokenizer', exclude_key=['__MACOSX'])
config_path = find_file(model_arg, '.config', exclude_key=['__MACOSX'])
print('config: {}'.format(config_path))
print('state_dict: {}'.format(state_dict_path))
print('model: {}'.format(model_path))
print('tokenizer: {}'.format(tokenizer_path))
self.opt = pickle.load(open(config_path, mode='rb'))
if state_dict_path or model_path:
if not hasattr(GloVeClassificationModelList, self.opt.model.__name__.upper()):
if 'pretrained_bert_name' in self.opt.args or 'pretrained_bert' in self.opt.args:
if 'pretrained_bert_name' in self.opt.args:
self.opt.pretrained_bert = self.opt.pretrained_bert_name
if state_dict_path:
try:
self.bert = AutoModel.from_pretrained(self.opt.pretrained_bert)
self.model = self.opt.model(self.bert, self.opt)
except ValueError:
raise TransformerConnectionError()
elif model_path:
if model_path:
self.model = torch.load(model_path, map_location='cpu')
if tokenizer_path:
self.tokenizer = pickle.load(open(tokenizer_path, mode='rb'))
else:
raise ValueError('No .tokenizer found!')
else:
self.tokenizer = build_tokenizer(
dataset_list=self.opt.dataset_file,
max_seq_len=self.opt.max_seq_len,
dat_fname='{0}_tokenizer.dat'.format(os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
if model_path:
self.model = torch.load(model_path, map_location='cpu')
else:
self.embedding_matrix = build_embedding_matrix(
word2idx=self.tokenizer.word2idx,
embed_dim=self.opt.embed_dim,
dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(self.opt.embed_dim), os.path.basename(self.opt.dataset_name)),
opt=self.opt
)
self.model = self.opt.model(self.embedding_matrix, self.opt).to(self.opt.device)
self.model.load_state_dict(torch.load(state_dict_path, map_location='cpu'))
print('Config used in Training:')
print_args(self.opt, mode=1)
except Exception as e:
raise RuntimeError('Exception: {} Fail to load the model from {}! '.format(e, model_arg))
if not hasattr(GloVeClassificationModelList, self.model.__class__.__name__) \
and not hasattr(BERTClassificationModelList, self.model.__class__.__name__):
raise KeyError('The checkpoint you are loading is not from classifier model.')
if hasattr(BERTClassificationModelList, self.opt.model.__name__):
self.dataset = BERTClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)
elif hasattr(GloVeClassificationModelList, self.opt.model.__name__):
self.dataset = GloVeClassificationDataset(tokenizer=self.tokenizer, opt=self.opt)
self.opt.inputs_cols = self.model.inputs
self.infer_dataloader = None
self.opt.eval_batch_size = eval_batch_size
if self.opt.seed is not None:
random.seed(self.opt.seed)
numpy.random.seed(self.opt.seed)
torch.manual_seed(self.opt.seed)
torch.cuda.manual_seed(self.opt.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
self.opt.initializer = self.opt.initializer
self.label_map = None
self.set_label_map(label_map)
def set_label_map(self, label_map):
if label_map:
print(colored('Warning: label map is deprecated, please directly set labels within dataset.', 'red'))
label_map[LABEL_PADDING] = ''
self.label_map = label_map
def to(self, device=None):
self.opt.device = device
self.model.to(device)
def cpu(self):
self.opt.device = 'cpu'
self.model.to('cpu')
def cuda(self, device='cuda:0'):
self.opt.device = device
self.model.to(device)
def _log_write_args(self):
n_trainable_params, n_nontrainable_params = 0, 0
for p in self.model.parameters():
n_params = torch.prod(torch.tensor(p.shape))
if p.requires_grad:
n_trainable_params += n_params
else:
n_nontrainable_params += n_params
print(
'n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))
for arg in vars(self.opt):
if getattr(self.opt, arg) is not None:
print('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))
def batch_infer(self,
target_file=None,
print_result=True,
save_result=False,
clear_input_samples=True,
ignore_error=True):
if clear_input_samples:
self.clear_input_samples()
save_path = os.path.join(os.getcwd(), 'text_classification.result.json')
target_file = detect_infer_dataset(target_file, task='text_classification')
if not target_file:
raise FileNotFoundError('Can not find inference datasets!')
self.dataset.prepare_infer_dataset(target_file, ignore_error=ignore_error)
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, pin_memory=True, shuffle=False)
return self._infer(save_path=save_path if save_result else None, print_result=print_result)
def infer(self, text: str = None,
print_result=True,
clear_input_samples=True):
if clear_input_samples:
self.clear_input_samples()
if text:
self.dataset.prepare_infer_sample(text)
else:
raise RuntimeError('Please specify your datasets path!')
self.infer_dataloader = DataLoader(dataset=self.dataset, batch_size=self.opt.eval_batch_size, shuffle=False)
return self._infer(print_result=print_result)
def merge_results(self, results):
final_res = []
for result in results:
if final_res and "".join(final_res[-1]['text'].split()) == "".join(result['text'].split()):
final_res[-1]['label'].append(result['label'])
final_res[-1]['ref_label'].append(result['ref_label'])
final_res[-1]['ref_check'].append(result['ref_check'])
else:
final_res.append(
{
'text': result['text'].replace(' ', ' '),
'label': [result['label']],
'ref_label': [result['ref_label']],
'ref_check': [result['ref_check']]
}
)
return final_res
def _infer(self, save_path=None, print_result=True):
_params = filter(lambda p: p.requires_grad, self.model.parameters())
correct = {True: 'Correct', False: 'Wrong'}
results = []
with torch.no_grad():
self.model.eval()
n_correct = 0
n_labeled = 0
n_total = 0
for _, sample in enumerate(self.infer_dataloader):
inputs = [sample[col].to(self.opt.device) for col in self.opt.inputs_cols if col != 'label']
self.model.eval()
outputs = self.model(inputs)
sen_logits = outputs
t_probs = torch.softmax(sen_logits, dim=-1).cpu().numpy()
for i, i_probs in enumerate(t_probs):
if 'index_to_label' in self.opt.args and int(i_probs.argmax(axis=-1)):
sent = self.opt.index_to_label[int(i_probs.argmax(axis=-1))]
if sample['label'] != -999:
real_sent = sample['label'][i] if isinstance(sample['label'][i], str) else self.opt.index_to_label.get(int(sample['label'][i]), 'N.A.')
else:
real_sent = 'N.A.'
if real_sent != -999 and real_sent != '-999':
n_labeled += 1
if sent == real_sent:
n_correct += 1
else:
sent = int(i_probs.argmax(axis=-1))
real_sent = int(sample['label'][i])
text_raw = sample['text_raw'][i]
results.append({
'text': text_raw,
'label': sent,
'ref_label': real_sent,
'ref_check': correct[sent == real_sent] if real_sent != '-999' else '',
})
n_total += 1
if len(self.infer_dataloader) > 1:
print('Total samples:{}'.format(n_total))
print('Labeled samples:{}'.format(n_labeled))
print('Prediction Accuracy:{}%'.format(100 * n_correct / n_labeled if n_labeled else 'N.A.'))
try:
if print_result:
for result in results:
text_printing = result['text']
if result['ref_label'] != -999:
if result['label'] == result['ref_label']:
text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'green')
else:
text_info = colored(' -> {}(ref:{})'.format(result['label'], result['ref_label']), 'red')
else:
text_info = ' -> {}'.format(result['label'])
text_printing += text_info
print(text_printing)
if save_path:
fout = open(save_path, 'w', encoding='utf8')
json.dump(json.JSONEncoder().encode({'results': results}), fout, ensure_ascii=False)
print('inference result saved in: {}'.format(save_path))
except Exception as e:
print('Can not save result: {}, Exception: {}'.format(text_raw, e))
return results
def clear_input_samples(self):
self.dataset.all_data = []
| true | true |
f723e39e9c8312aae14d30f2933e41983a1269e0 | 16,210 | py | Python | syfertext/language.py | AlexKer/SyferText | 021eea2255d9d8e1fc49c98c7b5f98b9e516ba21 | [
"Apache-2.0"
] | null | null | null | syfertext/language.py | AlexKer/SyferText | 021eea2255d9d8e1fc49c98c7b5f98b9e516ba21 | [
"Apache-2.0"
] | null | null | null | syfertext/language.py | AlexKer/SyferText | 021eea2255d9d8e1fc49c98c7b5f98b9e516ba21 | [
"Apache-2.0"
] | null | null | null | from .tokenizer import Tokenizer
from .vocab import Vocab
from .doc import Doc
from .pointers.doc_pointer import DocPointer
from .pipeline import SubPipeline
from syft.generic.object import AbstractObject
from syft.workers.base import BaseWorker
from syft.generic.string import String
from syft.generic.pointers.string_pointer import StringPointer
from syft.generic.pointers.object_pointer import ObjectPointer
from typing import List, Union, Tuple
class BaseDefaults(object):
"""A class that defines all the defaults of the Language class
"""
@classmethod
def create_vocab(cls, model_name) -> Vocab:
"""
Creates the Vocab object that holds the vocabulary along with vocabulary meta data
Todo:
I started by a very simple Vocab class that
contains only a variable called 'vectors' of type DICT to hold word vectors
vocab.vectors['word'] = float. To be reviewed for more complex functionality.
"""
# Instantiate the Vocab object
vocab = Vocab(model_name)
return vocab
@classmethod
def create_tokenizer(cls, vocab,) -> Tokenizer:
"""Creates a Tokenizer object that will be used to create the Doc object, which is the
main container for annotated tokens.
"""
# Instantiate the Tokenizer object and return it
tokenizer = Tokenizer(vocab,)
return tokenizer
class Language(AbstractObject):
"""Inspired by spaCy Language class.
Orchestrates the interactions between different components of the pipeline
to accomplish core text-processing task.
It create the Doc object which is the container into which all text-processing
pipeline components feed their results.
"""
def __init__(
self,
model_name,
id: int = None,
owner: BaseWorker = None,
tags: List[str] = None,
description: str = None,
):
# Define the default settings
self.Defaults = BaseDefaults
# Create the vocabulary
self.vocab = self.Defaults.create_vocab(model_name)
# Create a dictionary that associates to the name of each text-processing component
# of the pipeline, an object that is charged to accomplish the job.
self.factories = {"tokenizer": self.Defaults.create_tokenizer(self.vocab)}
# Initialize the subpipeline template
# It only contains the tokenizer at initialization
self.pipeline_template = [{"remote": True, "name": "tokenizer"}]
# Intialize the main pipeline
self._reset_pipeline()
super(Language, self).__init__(id=id, owner=owner, tags=tags, description=description)
@property
def pipe_names(self) -> List[str]:
"""Returns a list of component names in the pipeline in order of execution.
Returns:
(list): List of all pipeline component name in order of execution.
"""
return [pipe_template["name"] for pipe_template in self.pipeline_template]
def _parse_pipeline_template(self):
"""Parses the `pipeline_template` property to
create the `subpipeline_templates` property.
"""
# Initialize a subpipeline template with the
# tokenizer. The tokenizer alway has 'remote' set
# to True.
subpipeline_template = dict(
remote=self.pipeline_template[0]["remote"], names=[self.pipeline_template[0]["name"]],
)
# Initialize the subpipeline templates list as a class property
self.subpipeline_templates = [subpipeline_template]
# Loop through the pipeline template elements
for pipe_template in self.pipeline_template[1:]:
# compare `remote` properties between templates:
# If the pipe template has the same `remote` value,
# it is appended to the existing subpipeline template
if pipe_template["remote"] == subpipeline_template["remote"]:
subpipeline_template["names"].append(pipe_template["name"])
# Otherwise, create a new subpipeline template and add the
# pipe template to it
else:
subpipeline_template = dict(
remote=pipe_template["remote"], names=[pipe_template["name"]]
)
self.subpipeline_templates.append(subpipeline_template)
def _reset_pipeline(self):
"""Reset the `pipeline` class property.
"""
# Read the pipeline components from the template and aggregate them into
# a list of subpipline templates.
# This method will create the instance variable
# self.subpipeline_templates
self._parse_pipeline_template()
# Get the number of subpipelines
subpipeline_count = len(self.subpipeline_templates)
# Initialize a new empty pipeline with as many
# empty dicts as there are subpipelines
self.pipeline = [dict() for i in range(subpipeline_count)]
def add_pipe(
self,
component: callable,
remote: bool = False,
name: str = None,
before: str = None,
after: str = None,
first: bool = False,
last: bool = True,
):
"""Adds a pipe template to a subpipeline tempaltes.
A pipe template is a dict of the form `{'remote': remote, 'name': name}`.
Few main steps are carried out here:
1- The new pipe name is added at the right position in the pipeline template.
Here is an example of how pipeline template list looks like
self.pipeline_template = [{'remote': True, 'name': 'tokenizer'},
{'remote': True, 'name': <pipe_1_name>},
{'remote': True, 'name': <pipe_2_name>},
{'remote': False, 'name': <pipe_3_name>},
{'remote': False, 'name': <pipe_4_name>}]
2- The pipeline template is parsed into a list or subpipeline templates.
Each subpipeline template is an aggregation of adjacent pipes with
the same value for 'remote'
Here is an example of how the subpipeline template list for the above
pipeline template would look like:
self.subpipeline_templates = [{'remote': True, 'names': ['tokenizer',
'pipe_1_name',
'pipe_2_name']},
{'remote': False, 'name': ['pipe_3_name',
'pipe_4_name']}
]
3- The pipeline is initialize by creating a list with as many empty dicts as
there are subpipelines:
self.pipeline = [dict(), dict()]
Args:
component (callable): This is a callable that takes a Doc object and modifies
it inplace.
name (str): The name of the pipeline component to be added. Defaults to None.
remote (bool): If True, the pipe component will be sent to the remote worker
where the Doc object resides. If False, the pipe will operate locally,
either on a Doc object directly, or on a DocPointer returned by the previous
component in the pipeline. Defaults to False.
before (str): The name of the pipeline component before which the new component
is to be added. Defaults to None.
after (str): The name of the pipeline component after which the new component
is to be added. Defaults to None.
first (bool): if set to True, the new pipeline component will be add as the
first element of the pipeline (after the tokenizer). Defaults to False.
last (bool): if set to True, the new pipeline component will be add as the
last element of the pipeline (after the tokenizer). Defaults to True.
"""
# The component argument must be callable
# [TODO] An exception with a custom error message should be thrown
assert hasattr(component, "__call__"), "Argument `component` is not a callable."
# Make sure the `component` argument is an object that has a `factory()` method
assert hasattr(
component, "factory"
), "Argument `component` should be an object that has a `factory()` method"
# [TODO] The following requirement should be relaxed and a name should be
# automatically assigned in case `name` is None. This would be convenient
# as done by spaCy
assert (
isinstance(name, str) and len(name) >= 1
), "Argument `name` should be of type `str` with at least one character."
# [TODO] Add custom error message
assert (
name not in self.pipe_names
), "Pipeline component name '{}' that you have chosen is already used by another pipeline component.".format(
name
)
# Make sure only one of 'before', 'after', 'first' or 'last' is set
# [TODO] Add custom error message
assert (
sum([bool(before), bool(after), bool(first), bool(last)]) < 2
), "Only one among arguments 'before', 'after', 'first' or 'last' should be set."
# Add the new pipe component to the list of factories
self.factories[name] = component
# Create the pipe template that will be added the pipeline
# template
pipe_template = dict(remote=remote, name=name)
# Add the pipe template at the right position
if last or not any([before, after, first]):
self.pipeline_template.append(pipe_template)
elif first:
# The index 0 is reserved for the tokenizer
self.pipeline_template.insert(index=1, element=pipe_template)
elif before in self.pipe_names:
self.pipeline_template.insert(
index=self.pipe_names.index(before), element=pipe_template
)
elif after in self.pipe_names:
self.pipeline_template.insert(
index=self.pipe_names.index(after) + 1, element=pipe_template
)
else:
# [TODO] Raise exception with custom error message
assert (
False
), "component cannot be added to the pipeline, \
please double check argument values of the `add_pipe` method call."
# Reset the pipeline.
# The instance variable that will be affected is:
# self.pipeline
self._reset_pipeline()
def remove_pipe(self, name: str) -> Tuple[str, callable]:
"""Removes the pipeline whose name is 'name'
Args:
name (str): The name of the pipeline component to remove.
Returns:
The removed pipe
"""
# [TODO] Add custom error message
assert (
name in self.pipe_names
), "No pipeline component with the specified name '{}' was found".format(name)
# Get the index of the pipeline to be removed in the
# self.pipeline list
pipe_index = self.pipe_names.index(name)
# Delete the pipe using its index
pipe = self.pipeline_template.pop(pipe_index)
# Parse the pipeline template again
# to create the subpipeline templates
self._parse_pipeline_template()
# Reset the pipeline.
self._reset_pipeline()
return pipe
def _run_subpipeline_from_template(
self, template_index: int, input=Union[str, String, StringPointer, Doc, DocPointer],
) -> Union[Doc, DocPointer]:
"""Runs the subpipeline at position `template_index` of
self.pipeline on the appropriate worker.
The worker on which the subpipeline is run is either the
the same worker on which `input` lives, if the `remote`
property of the subpipeline template is True. Or, it is the
local worker if `remote` is False.
If no subpipeline is yet created for the specified worker,
one is created using the template, and added to the pipeline.
Args:
template_index (int): The index of the subpipeline
template in `self.subpipelines_templates`
input (str, String, StringPointer, Doc, DocPointer):
The input on which the subpipeline operates.
It can be either the text to tokenize (or a pointer
to it) for the subpipeline at index 0, or it could
be the Doc (or its pointer) for all subsequent
subpipelines.
Returns:
(Doc or DocPointer): The new or updated Doc object or
a pointer to a Doc object.
"""
# Get the location ID of the worker where the text to be tokenized,
# or the Doc to be processed is located
if isinstance(input, ObjectPointer):
location_id = input.location.id
else:
location_id = self.owner.id
# Create a new SubPipeline object if one doesn't already exist on the
# worker where the input is located
if location_id not in self.pipeline[template_index]:
# Get the subpipeline template
subpipeline_template = self.subpipeline_templates[template_index]
# Is the pipeline a remote one?
remote = subpipeline_template["remote"]
# Instantiate a subpipeline and load the subpipeline template
subpipeline = SubPipeline()
subpipeline.load_template(template=subpipeline_template, factories=self.factories)
# Add the subpipeline to the pipeline
self.pipeline[template_index][location_id] = subpipeline
# Send the subpipeline to the worker where the input is located
if (
isinstance(input, ObjectPointer)
and input.location != self.owner # Is the input remote?
and remote # Is the subpipeline is sendable?
):
self.pipeline[template_index][location_id] = self.pipeline[template_index][
location_id
].send(input.location)
# Apply the subpipeline and get the doc or the Doc id.
# If a Doc ID is obtained, this signifies the ID of the
# Doc object on the remote worker.
doc_or_id = self.pipeline[template_index][location_id](input)
# If the doc is of type str or int, this means that a
# DocPointer should be created
if isinstance(doc_or_id, int) or isinstance(doc_or_id, str):
doc = DocPointer(location=input.location, id_at_location=doc_or_id, owner=self.owner)
# This is of type Doc then
else:
doc = doc_or_id
# return the doc
return doc
def __call__(self, text: Union[str, String, StringPointer]) -> Union[Doc, DocPointer]:
"""The text is tokenized and pipeline components are called
here, and the Doc object is returned.
Args:
text (str, String or StringPointer): the text to be tokenized and
processed by the pipeline components.
Returns:
(Doc or DocPointer): The Doc object or a pointer to a Doc object.
This object provides access to all token data.
"""
# Runs the first subpipeline.
# The first subpipeline is the one that has the tokenizer
doc = self._run_subpipeline_from_template(template_index=0, input=text)
# Apply the the rest of subpipelines sequentially
# Each subpipeline will modify the document `doc` inplace
for i, subpipeline in enumerate(self.pipeline[1:], start=1):
doc = self._run_subpipeline_from_template(template_index=i, input=doc)
# return the Doc object
return doc
| 39.440389 | 117 | 0.613017 | from .tokenizer import Tokenizer
from .vocab import Vocab
from .doc import Doc
from .pointers.doc_pointer import DocPointer
from .pipeline import SubPipeline
from syft.generic.object import AbstractObject
from syft.workers.base import BaseWorker
from syft.generic.string import String
from syft.generic.pointers.string_pointer import StringPointer
from syft.generic.pointers.object_pointer import ObjectPointer
from typing import List, Union, Tuple
class BaseDefaults(object):
@classmethod
def create_vocab(cls, model_name) -> Vocab:
vocab = Vocab(model_name)
return vocab
@classmethod
def create_tokenizer(cls, vocab,) -> Tokenizer:
tokenizer = Tokenizer(vocab,)
return tokenizer
class Language(AbstractObject):
def __init__(
self,
model_name,
id: int = None,
owner: BaseWorker = None,
tags: List[str] = None,
description: str = None,
):
self.Defaults = BaseDefaults
self.vocab = self.Defaults.create_vocab(model_name)
self.factories = {"tokenizer": self.Defaults.create_tokenizer(self.vocab)}
self.pipeline_template = [{"remote": True, "name": "tokenizer"}]
self._reset_pipeline()
super(Language, self).__init__(id=id, owner=owner, tags=tags, description=description)
@property
def pipe_names(self) -> List[str]:
return [pipe_template["name"] for pipe_template in self.pipeline_template]
def _parse_pipeline_template(self):
subpipeline_template = dict(
remote=self.pipeline_template[0]["remote"], names=[self.pipeline_template[0]["name"]],
)
self.subpipeline_templates = [subpipeline_template]
for pipe_template in self.pipeline_template[1:]:
if pipe_template["remote"] == subpipeline_template["remote"]:
subpipeline_template["names"].append(pipe_template["name"])
else:
subpipeline_template = dict(
remote=pipe_template["remote"], names=[pipe_template["name"]]
)
self.subpipeline_templates.append(subpipeline_template)
def _reset_pipeline(self):
self._parse_pipeline_template()
subpipeline_count = len(self.subpipeline_templates)
self.pipeline = [dict() for i in range(subpipeline_count)]
def add_pipe(
self,
component: callable,
remote: bool = False,
name: str = None,
before: str = None,
after: str = None,
first: bool = False,
last: bool = True,
):
assert hasattr(component, "__call__"), "Argument `component` is not a callable."
assert hasattr(
component, "factory"
), "Argument `component` should be an object that has a `factory()` method"
assert (
isinstance(name, str) and len(name) >= 1
), "Argument `name` should be of type `str` with at least one character."
assert (
name not in self.pipe_names
), "Pipeline component name '{}' that you have chosen is already used by another pipeline component.".format(
name
)
assert (
sum([bool(before), bool(after), bool(first), bool(last)]) < 2
), "Only one among arguments 'before', 'after', 'first' or 'last' should be set."
self.factories[name] = component
pipe_template = dict(remote=remote, name=name)
if last or not any([before, after, first]):
self.pipeline_template.append(pipe_template)
elif first:
self.pipeline_template.insert(index=1, element=pipe_template)
elif before in self.pipe_names:
self.pipeline_template.insert(
index=self.pipe_names.index(before), element=pipe_template
)
elif after in self.pipe_names:
self.pipeline_template.insert(
index=self.pipe_names.index(after) + 1, element=pipe_template
)
else:
assert (
False
), "component cannot be added to the pipeline, \
please double check argument values of the `add_pipe` method call."
self._reset_pipeline()
def remove_pipe(self, name: str) -> Tuple[str, callable]:
assert (
name in self.pipe_names
), "No pipeline component with the specified name '{}' was found".format(name)
pipe_index = self.pipe_names.index(name)
pipe = self.pipeline_template.pop(pipe_index)
self._parse_pipeline_template()
self._reset_pipeline()
return pipe
def _run_subpipeline_from_template(
self, template_index: int, input=Union[str, String, StringPointer, Doc, DocPointer],
) -> Union[Doc, DocPointer]:
if isinstance(input, ObjectPointer):
location_id = input.location.id
else:
location_id = self.owner.id
# worker where the input is located
if location_id not in self.pipeline[template_index]:
# Get the subpipeline template
subpipeline_template = self.subpipeline_templates[template_index]
# Is the pipeline a remote one?
remote = subpipeline_template["remote"]
# Instantiate a subpipeline and load the subpipeline template
subpipeline = SubPipeline()
subpipeline.load_template(template=subpipeline_template, factories=self.factories)
# Add the subpipeline to the pipeline
self.pipeline[template_index][location_id] = subpipeline
# Send the subpipeline to the worker where the input is located
if (
isinstance(input, ObjectPointer)
and input.location != self.owner # Is the input remote?
and remote # Is the subpipeline is sendable?
):
self.pipeline[template_index][location_id] = self.pipeline[template_index][
location_id
].send(input.location)
# Apply the subpipeline and get the doc or the Doc id.
# If a Doc ID is obtained, this signifies the ID of the
# Doc object on the remote worker.
doc_or_id = self.pipeline[template_index][location_id](input)
# If the doc is of type str or int, this means that a
# DocPointer should be created
if isinstance(doc_or_id, int) or isinstance(doc_or_id, str):
doc = DocPointer(location=input.location, id_at_location=doc_or_id, owner=self.owner)
# This is of type Doc then
else:
doc = doc_or_id
# return the doc
return doc
def __call__(self, text: Union[str, String, StringPointer]) -> Union[Doc, DocPointer]:
# Runs the first subpipeline.
# The first subpipeline is the one that has the tokenizer
doc = self._run_subpipeline_from_template(template_index=0, input=text)
# Apply the the rest of subpipelines sequentially
# Each subpipeline will modify the document `doc` inplace
for i, subpipeline in enumerate(self.pipeline[1:], start=1):
doc = self._run_subpipeline_from_template(template_index=i, input=doc)
# return the Doc object
return doc
| true | true |
f723e3b85ffbd830c43dccf93a9dd2bd55bc2e30 | 31,635 | py | Python | sdk/python/pulumi_google_native/apigee/v1/rate_plan.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/apigee/v1/rate_plan.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/apigee/v1/rate_plan.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RatePlanArgs', 'RatePlan']
@pulumi.input_type
class RatePlanArgs:
def __init__(__self__, *,
apiproduct_id: pulumi.Input[str],
organization_id: pulumi.Input[str],
apiproduct: Optional[pulumi.Input[str]] = None,
billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None,
consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]] = None,
consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None,
currency_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
fixed_fee_frequency: Optional[pulumi.Input[int]] = None,
fixed_recurring_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None,
revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]] = None,
revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None,
setup_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None,
start_time: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['RatePlanState']] = None):
"""
The set of arguments for constructing a RatePlan resource.
:param pulumi.Input[str] apiproduct: Name of the API product that the rate plan is associated with.
:param pulumi.Input['RatePlanBillingPeriod'] billing_period: Frequency at which the customer will be billed.
:param pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]] consumption_pricing_rates: API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200.
:param pulumi.Input['RatePlanConsumptionPricingType'] consumption_pricing_type: Pricing model used for consumption-based charges.
:param pulumi.Input[str] currency_code: Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard.
:param pulumi.Input[str] description: Description of the rate plan.
:param pulumi.Input[str] display_name: Display name of the rate plan.
:param pulumi.Input[str] end_time: Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire.
:param pulumi.Input[int] fixed_fee_frequency: Frequency at which the fixed fee is charged.
:param pulumi.Input['GoogleTypeMoneyArgs'] fixed_recurring_fee: Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period.
:param pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]] revenue_share_rates: Details of the revenue sharing model.
:param pulumi.Input['RatePlanRevenueShareType'] revenue_share_type: Method used to calculate the revenue that is shared with developers.
:param pulumi.Input['GoogleTypeMoneyArgs'] setup_fee: Initial, one-time fee paid when purchasing the API product.
:param pulumi.Input[str] start_time: Time when the rate plan becomes active in milliseconds since epoch.
:param pulumi.Input['RatePlanState'] state: Current state of the rate plan (draft or published).
"""
pulumi.set(__self__, "apiproduct_id", apiproduct_id)
pulumi.set(__self__, "organization_id", organization_id)
if apiproduct is not None:
pulumi.set(__self__, "apiproduct", apiproduct)
if billing_period is not None:
pulumi.set(__self__, "billing_period", billing_period)
if consumption_pricing_rates is not None:
pulumi.set(__self__, "consumption_pricing_rates", consumption_pricing_rates)
if consumption_pricing_type is not None:
pulumi.set(__self__, "consumption_pricing_type", consumption_pricing_type)
if currency_code is not None:
pulumi.set(__self__, "currency_code", currency_code)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if fixed_fee_frequency is not None:
pulumi.set(__self__, "fixed_fee_frequency", fixed_fee_frequency)
if fixed_recurring_fee is not None:
pulumi.set(__self__, "fixed_recurring_fee", fixed_recurring_fee)
if revenue_share_rates is not None:
pulumi.set(__self__, "revenue_share_rates", revenue_share_rates)
if revenue_share_type is not None:
pulumi.set(__self__, "revenue_share_type", revenue_share_type)
if setup_fee is not None:
pulumi.set(__self__, "setup_fee", setup_fee)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="apiproductId")
def apiproduct_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "apiproduct_id")
@apiproduct_id.setter
def apiproduct_id(self, value: pulumi.Input[str]):
pulumi.set(self, "apiproduct_id", value)
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "organization_id")
@organization_id.setter
def organization_id(self, value: pulumi.Input[str]):
pulumi.set(self, "organization_id", value)
@property
@pulumi.getter
def apiproduct(self) -> Optional[pulumi.Input[str]]:
"""
Name of the API product that the rate plan is associated with.
"""
return pulumi.get(self, "apiproduct")
@apiproduct.setter
def apiproduct(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "apiproduct", value)
@property
@pulumi.getter(name="billingPeriod")
def billing_period(self) -> Optional[pulumi.Input['RatePlanBillingPeriod']]:
"""
Frequency at which the customer will be billed.
"""
return pulumi.get(self, "billing_period")
@billing_period.setter
def billing_period(self, value: Optional[pulumi.Input['RatePlanBillingPeriod']]):
pulumi.set(self, "billing_period", value)
@property
@pulumi.getter(name="consumptionPricingRates")
def consumption_pricing_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]:
"""
API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200.
"""
return pulumi.get(self, "consumption_pricing_rates")
@consumption_pricing_rates.setter
def consumption_pricing_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]):
pulumi.set(self, "consumption_pricing_rates", value)
@property
@pulumi.getter(name="consumptionPricingType")
def consumption_pricing_type(self) -> Optional[pulumi.Input['RatePlanConsumptionPricingType']]:
"""
Pricing model used for consumption-based charges.
"""
return pulumi.get(self, "consumption_pricing_type")
@consumption_pricing_type.setter
def consumption_pricing_type(self, value: Optional[pulumi.Input['RatePlanConsumptionPricingType']]):
pulumi.set(self, "consumption_pricing_type", value)
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> Optional[pulumi.Input[str]]:
"""
Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard.
"""
return pulumi.get(self, "currency_code")
@currency_code.setter
def currency_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "currency_code", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the rate plan.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
Display name of the rate plan.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[str]]:
"""
Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire.
"""
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="fixedFeeFrequency")
def fixed_fee_frequency(self) -> Optional[pulumi.Input[int]]:
"""
Frequency at which the fixed fee is charged.
"""
return pulumi.get(self, "fixed_fee_frequency")
@fixed_fee_frequency.setter
def fixed_fee_frequency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fixed_fee_frequency", value)
@property
@pulumi.getter(name="fixedRecurringFee")
def fixed_recurring_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]:
"""
Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period.
"""
return pulumi.get(self, "fixed_recurring_fee")
@fixed_recurring_fee.setter
def fixed_recurring_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]):
pulumi.set(self, "fixed_recurring_fee", value)
@property
@pulumi.getter(name="revenueShareRates")
def revenue_share_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]:
"""
Details of the revenue sharing model.
"""
return pulumi.get(self, "revenue_share_rates")
@revenue_share_rates.setter
def revenue_share_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]):
pulumi.set(self, "revenue_share_rates", value)
@property
@pulumi.getter(name="revenueShareType")
def revenue_share_type(self) -> Optional[pulumi.Input['RatePlanRevenueShareType']]:
"""
Method used to calculate the revenue that is shared with developers.
"""
return pulumi.get(self, "revenue_share_type")
@revenue_share_type.setter
def revenue_share_type(self, value: Optional[pulumi.Input['RatePlanRevenueShareType']]):
pulumi.set(self, "revenue_share_type", value)
@property
@pulumi.getter(name="setupFee")
def setup_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]:
"""
Initial, one-time fee paid when purchasing the API product.
"""
return pulumi.get(self, "setup_fee")
@setup_fee.setter
def setup_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]):
pulumi.set(self, "setup_fee", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
"""
Time when the rate plan becomes active in milliseconds since epoch.
"""
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input['RatePlanState']]:
"""
Current state of the rate plan (draft or published).
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input['RatePlanState']]):
pulumi.set(self, "state", value)
class RatePlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apiproduct: Optional[pulumi.Input[str]] = None,
apiproduct_id: Optional[pulumi.Input[str]] = None,
billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None,
consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None,
consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None,
currency_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
fixed_fee_frequency: Optional[pulumi.Input[int]] = None,
fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None,
revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None,
setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
start_time: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['RatePlanState']] = None,
__props__=None):
"""
Create a rate plan that is associated with an API product in an organization. Using rate plans, API product owners can monetize their API products by configuring one or more of the following: - Billing frequency - Initial setup fees for using an API product - Payment funding model (postpaid only) - Fixed recurring or consumption-based charges for using an API product - Revenue sharing with developer partners An API product can have multiple rate plans associated with it but *only one* rate plan can be active at any point of time. **Note: From the developer's perspective, they purchase API products not rate plans.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] apiproduct: Name of the API product that the rate plan is associated with.
:param pulumi.Input['RatePlanBillingPeriod'] billing_period: Frequency at which the customer will be billed.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]] consumption_pricing_rates: API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200.
:param pulumi.Input['RatePlanConsumptionPricingType'] consumption_pricing_type: Pricing model used for consumption-based charges.
:param pulumi.Input[str] currency_code: Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard.
:param pulumi.Input[str] description: Description of the rate plan.
:param pulumi.Input[str] display_name: Display name of the rate plan.
:param pulumi.Input[str] end_time: Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire.
:param pulumi.Input[int] fixed_fee_frequency: Frequency at which the fixed fee is charged.
:param pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']] fixed_recurring_fee: Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]] revenue_share_rates: Details of the revenue sharing model.
:param pulumi.Input['RatePlanRevenueShareType'] revenue_share_type: Method used to calculate the revenue that is shared with developers.
:param pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']] setup_fee: Initial, one-time fee paid when purchasing the API product.
:param pulumi.Input[str] start_time: Time when the rate plan becomes active in milliseconds since epoch.
:param pulumi.Input['RatePlanState'] state: Current state of the rate plan (draft or published).
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: RatePlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a rate plan that is associated with an API product in an organization. Using rate plans, API product owners can monetize their API products by configuring one or more of the following: - Billing frequency - Initial setup fees for using an API product - Payment funding model (postpaid only) - Fixed recurring or consumption-based charges for using an API product - Revenue sharing with developer partners An API product can have multiple rate plans associated with it but *only one* rate plan can be active at any point of time. **Note: From the developer's perspective, they purchase API products not rate plans.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param RatePlanArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RatePlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apiproduct: Optional[pulumi.Input[str]] = None,
apiproduct_id: Optional[pulumi.Input[str]] = None,
billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None,
consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None,
consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None,
currency_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
fixed_fee_frequency: Optional[pulumi.Input[int]] = None,
fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None,
revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None,
setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
start_time: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['RatePlanState']] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RatePlanArgs.__new__(RatePlanArgs)
__props__.__dict__["apiproduct"] = apiproduct
if apiproduct_id is None and not opts.urn:
raise TypeError("Missing required property 'apiproduct_id'")
__props__.__dict__["apiproduct_id"] = apiproduct_id
__props__.__dict__["billing_period"] = billing_period
__props__.__dict__["consumption_pricing_rates"] = consumption_pricing_rates
__props__.__dict__["consumption_pricing_type"] = consumption_pricing_type
__props__.__dict__["currency_code"] = currency_code
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["end_time"] = end_time
__props__.__dict__["fixed_fee_frequency"] = fixed_fee_frequency
__props__.__dict__["fixed_recurring_fee"] = fixed_recurring_fee
if organization_id is None and not opts.urn:
raise TypeError("Missing required property 'organization_id'")
__props__.__dict__["organization_id"] = organization_id
__props__.__dict__["revenue_share_rates"] = revenue_share_rates
__props__.__dict__["revenue_share_type"] = revenue_share_type
__props__.__dict__["setup_fee"] = setup_fee
__props__.__dict__["start_time"] = start_time
__props__.__dict__["state"] = state
__props__.__dict__["created_at"] = None
__props__.__dict__["last_modified_at"] = None
__props__.__dict__["name"] = None
super(RatePlan, __self__).__init__(
'google-native:apigee/v1:RatePlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RatePlan':
"""
Get an existing RatePlan resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RatePlanArgs.__new__(RatePlanArgs)
__props__.__dict__["apiproduct"] = None
__props__.__dict__["billing_period"] = None
__props__.__dict__["consumption_pricing_rates"] = None
__props__.__dict__["consumption_pricing_type"] = None
__props__.__dict__["created_at"] = None
__props__.__dict__["currency_code"] = None
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["end_time"] = None
__props__.__dict__["fixed_fee_frequency"] = None
__props__.__dict__["fixed_recurring_fee"] = None
__props__.__dict__["last_modified_at"] = None
__props__.__dict__["name"] = None
__props__.__dict__["revenue_share_rates"] = None
__props__.__dict__["revenue_share_type"] = None
__props__.__dict__["setup_fee"] = None
__props__.__dict__["start_time"] = None
__props__.__dict__["state"] = None
return RatePlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def apiproduct(self) -> pulumi.Output[str]:
"""
Name of the API product that the rate plan is associated with.
"""
return pulumi.get(self, "apiproduct")
@property
@pulumi.getter(name="billingPeriod")
def billing_period(self) -> pulumi.Output[str]:
"""
Frequency at which the customer will be billed.
"""
return pulumi.get(self, "billing_period")
@property
@pulumi.getter(name="consumptionPricingRates")
def consumption_pricing_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RateRangeResponse']]:
"""
API call volume ranges and the fees charged when the total number of API calls is within a given range. The method used to calculate the final fee depends on the selected pricing model. For example, if the pricing model is `STAIRSTEP` and the ranges are defined as follows: ``` { "start": 1, "end": 100, "fee": 75 }, { "start": 101, "end": 200, "fee": 100 }, } ``` Then the following fees would be charged based on the total number of API calls (assuming the currency selected is `USD`): * 1 call costs $75 * 50 calls cost $75 * 150 calls cost $100 The number of API calls cannot exceed 200.
"""
return pulumi.get(self, "consumption_pricing_rates")
@property
@pulumi.getter(name="consumptionPricingType")
def consumption_pricing_type(self) -> pulumi.Output[str]:
"""
Pricing model used for consumption-based charges.
"""
return pulumi.get(self, "consumption_pricing_type")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
"""
Time that the rate plan was created in milliseconds since epoch.
"""
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> pulumi.Output[str]:
"""
Currency to be used for billing. Consists of a three-letter code as defined by the [ISO 4217](https://en.wikipedia.org/wiki/ISO_4217) standard.
"""
return pulumi.get(self, "currency_code")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
Description of the rate plan.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
Display name of the rate plan.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> pulumi.Output[str]:
"""
Time when the rate plan will expire in milliseconds since epoch. Set to 0 or `null` to indicate that the rate plan should never expire.
"""
return pulumi.get(self, "end_time")
@property
@pulumi.getter(name="fixedFeeFrequency")
def fixed_fee_frequency(self) -> pulumi.Output[int]:
"""
Frequency at which the fixed fee is charged.
"""
return pulumi.get(self, "fixed_fee_frequency")
@property
@pulumi.getter(name="fixedRecurringFee")
def fixed_recurring_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']:
"""
Fixed amount that is charged at a defined interval and billed in advance of use of the API product. The fee will be prorated for the first billing period.
"""
return pulumi.get(self, "fixed_recurring_fee")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> pulumi.Output[str]:
"""
Time the rate plan was last modified in milliseconds since epoch.
"""
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the rate plan.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="revenueShareRates")
def revenue_share_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RevenueShareRangeResponse']]:
"""
Details of the revenue sharing model.
"""
return pulumi.get(self, "revenue_share_rates")
@property
@pulumi.getter(name="revenueShareType")
def revenue_share_type(self) -> pulumi.Output[str]:
"""
Method used to calculate the revenue that is shared with developers.
"""
return pulumi.get(self, "revenue_share_type")
@property
@pulumi.getter(name="setupFee")
def setup_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']:
"""
Initial, one-time fee paid when purchasing the API product.
"""
return pulumi.get(self, "setup_fee")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[str]:
"""
Time when the rate plan becomes active in milliseconds since epoch.
"""
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Current state of the rate plan (draft or published).
"""
return pulumi.get(self, "state")
| 53.168067 | 724 | 0.676656 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['RatePlanArgs', 'RatePlan']
@pulumi.input_type
class RatePlanArgs:
def __init__(__self__, *,
apiproduct_id: pulumi.Input[str],
organization_id: pulumi.Input[str],
apiproduct: Optional[pulumi.Input[str]] = None,
billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None,
consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]] = None,
consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None,
currency_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
fixed_fee_frequency: Optional[pulumi.Input[int]] = None,
fixed_recurring_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None,
revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]] = None,
revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None,
setup_fee: Optional[pulumi.Input['GoogleTypeMoneyArgs']] = None,
start_time: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['RatePlanState']] = None):
pulumi.set(__self__, "apiproduct_id", apiproduct_id)
pulumi.set(__self__, "organization_id", organization_id)
if apiproduct is not None:
pulumi.set(__self__, "apiproduct", apiproduct)
if billing_period is not None:
pulumi.set(__self__, "billing_period", billing_period)
if consumption_pricing_rates is not None:
pulumi.set(__self__, "consumption_pricing_rates", consumption_pricing_rates)
if consumption_pricing_type is not None:
pulumi.set(__self__, "consumption_pricing_type", consumption_pricing_type)
if currency_code is not None:
pulumi.set(__self__, "currency_code", currency_code)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if end_time is not None:
pulumi.set(__self__, "end_time", end_time)
if fixed_fee_frequency is not None:
pulumi.set(__self__, "fixed_fee_frequency", fixed_fee_frequency)
if fixed_recurring_fee is not None:
pulumi.set(__self__, "fixed_recurring_fee", fixed_recurring_fee)
if revenue_share_rates is not None:
pulumi.set(__self__, "revenue_share_rates", revenue_share_rates)
if revenue_share_type is not None:
pulumi.set(__self__, "revenue_share_type", revenue_share_type)
if setup_fee is not None:
pulumi.set(__self__, "setup_fee", setup_fee)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="apiproductId")
def apiproduct_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "apiproduct_id")
@apiproduct_id.setter
def apiproduct_id(self, value: pulumi.Input[str]):
pulumi.set(self, "apiproduct_id", value)
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "organization_id")
@organization_id.setter
def organization_id(self, value: pulumi.Input[str]):
pulumi.set(self, "organization_id", value)
@property
@pulumi.getter
def apiproduct(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "apiproduct")
@apiproduct.setter
def apiproduct(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "apiproduct", value)
@property
@pulumi.getter(name="billingPeriod")
def billing_period(self) -> Optional[pulumi.Input['RatePlanBillingPeriod']]:
return pulumi.get(self, "billing_period")
@billing_period.setter
def billing_period(self, value: Optional[pulumi.Input['RatePlanBillingPeriod']]):
pulumi.set(self, "billing_period", value)
@property
@pulumi.getter(name="consumptionPricingRates")
def consumption_pricing_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]:
return pulumi.get(self, "consumption_pricing_rates")
@consumption_pricing_rates.setter
def consumption_pricing_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RateRangeArgs']]]]):
pulumi.set(self, "consumption_pricing_rates", value)
@property
@pulumi.getter(name="consumptionPricingType")
def consumption_pricing_type(self) -> Optional[pulumi.Input['RatePlanConsumptionPricingType']]:
return pulumi.get(self, "consumption_pricing_type")
@consumption_pricing_type.setter
def consumption_pricing_type(self, value: Optional[pulumi.Input['RatePlanConsumptionPricingType']]):
pulumi.set(self, "consumption_pricing_type", value)
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "currency_code")
@currency_code.setter
def currency_code(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "currency_code", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="endTime")
def end_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "end_time")
@end_time.setter
def end_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "end_time", value)
@property
@pulumi.getter(name="fixedFeeFrequency")
def fixed_fee_frequency(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "fixed_fee_frequency")
@fixed_fee_frequency.setter
def fixed_fee_frequency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "fixed_fee_frequency", value)
@property
@pulumi.getter(name="fixedRecurringFee")
def fixed_recurring_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]:
return pulumi.get(self, "fixed_recurring_fee")
@fixed_recurring_fee.setter
def fixed_recurring_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]):
pulumi.set(self, "fixed_recurring_fee", value)
@property
@pulumi.getter(name="revenueShareRates")
def revenue_share_rates(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]:
return pulumi.get(self, "revenue_share_rates")
@revenue_share_rates.setter
def revenue_share_rates(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]):
pulumi.set(self, "revenue_share_rates", value)
@property
@pulumi.getter(name="revenueShareType")
def revenue_share_type(self) -> Optional[pulumi.Input['RatePlanRevenueShareType']]:
return pulumi.get(self, "revenue_share_type")
@revenue_share_type.setter
def revenue_share_type(self, value: Optional[pulumi.Input['RatePlanRevenueShareType']]):
pulumi.set(self, "revenue_share_type", value)
@property
@pulumi.getter(name="setupFee")
def setup_fee(self) -> Optional[pulumi.Input['GoogleTypeMoneyArgs']]:
return pulumi.get(self, "setup_fee")
@setup_fee.setter
def setup_fee(self, value: Optional[pulumi.Input['GoogleTypeMoneyArgs']]):
pulumi.set(self, "setup_fee", value)
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "start_time")
@start_time.setter
def start_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "start_time", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input['RatePlanState']]:
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input['RatePlanState']]):
pulumi.set(self, "state", value)
class RatePlan(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apiproduct: Optional[pulumi.Input[str]] = None,
apiproduct_id: Optional[pulumi.Input[str]] = None,
billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None,
consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None,
consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None,
currency_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
fixed_fee_frequency: Optional[pulumi.Input[int]] = None,
fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None,
revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None,
setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
start_time: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['RatePlanState']] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: RatePlanArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(RatePlanArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
apiproduct: Optional[pulumi.Input[str]] = None,
apiproduct_id: Optional[pulumi.Input[str]] = None,
billing_period: Optional[pulumi.Input['RatePlanBillingPeriod']] = None,
consumption_pricing_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RateRangeArgs']]]]] = None,
consumption_pricing_type: Optional[pulumi.Input['RatePlanConsumptionPricingType']] = None,
currency_code: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
end_time: Optional[pulumi.Input[str]] = None,
fixed_fee_frequency: Optional[pulumi.Input[int]] = None,
fixed_recurring_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
organization_id: Optional[pulumi.Input[str]] = None,
revenue_share_rates: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['GoogleCloudApigeeV1RevenueShareRangeArgs']]]]] = None,
revenue_share_type: Optional[pulumi.Input['RatePlanRevenueShareType']] = None,
setup_fee: Optional[pulumi.Input[pulumi.InputType['GoogleTypeMoneyArgs']]] = None,
start_time: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input['RatePlanState']] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = RatePlanArgs.__new__(RatePlanArgs)
__props__.__dict__["apiproduct"] = apiproduct
if apiproduct_id is None and not opts.urn:
raise TypeError("Missing required property 'apiproduct_id'")
__props__.__dict__["apiproduct_id"] = apiproduct_id
__props__.__dict__["billing_period"] = billing_period
__props__.__dict__["consumption_pricing_rates"] = consumption_pricing_rates
__props__.__dict__["consumption_pricing_type"] = consumption_pricing_type
__props__.__dict__["currency_code"] = currency_code
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["end_time"] = end_time
__props__.__dict__["fixed_fee_frequency"] = fixed_fee_frequency
__props__.__dict__["fixed_recurring_fee"] = fixed_recurring_fee
if organization_id is None and not opts.urn:
raise TypeError("Missing required property 'organization_id'")
__props__.__dict__["organization_id"] = organization_id
__props__.__dict__["revenue_share_rates"] = revenue_share_rates
__props__.__dict__["revenue_share_type"] = revenue_share_type
__props__.__dict__["setup_fee"] = setup_fee
__props__.__dict__["start_time"] = start_time
__props__.__dict__["state"] = state
__props__.__dict__["created_at"] = None
__props__.__dict__["last_modified_at"] = None
__props__.__dict__["name"] = None
super(RatePlan, __self__).__init__(
'google-native:apigee/v1:RatePlan',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'RatePlan':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = RatePlanArgs.__new__(RatePlanArgs)
__props__.__dict__["apiproduct"] = None
__props__.__dict__["billing_period"] = None
__props__.__dict__["consumption_pricing_rates"] = None
__props__.__dict__["consumption_pricing_type"] = None
__props__.__dict__["created_at"] = None
__props__.__dict__["currency_code"] = None
__props__.__dict__["description"] = None
__props__.__dict__["display_name"] = None
__props__.__dict__["end_time"] = None
__props__.__dict__["fixed_fee_frequency"] = None
__props__.__dict__["fixed_recurring_fee"] = None
__props__.__dict__["last_modified_at"] = None
__props__.__dict__["name"] = None
__props__.__dict__["revenue_share_rates"] = None
__props__.__dict__["revenue_share_type"] = None
__props__.__dict__["setup_fee"] = None
__props__.__dict__["start_time"] = None
__props__.__dict__["state"] = None
return RatePlan(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def apiproduct(self) -> pulumi.Output[str]:
return pulumi.get(self, "apiproduct")
@property
@pulumi.getter(name="billingPeriod")
def billing_period(self) -> pulumi.Output[str]:
return pulumi.get(self, "billing_period")
@property
@pulumi.getter(name="consumptionPricingRates")
def consumption_pricing_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RateRangeResponse']]:
return pulumi.get(self, "consumption_pricing_rates")
@property
@pulumi.getter(name="consumptionPricingType")
def consumption_pricing_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "consumption_pricing_type")
@property
@pulumi.getter(name="createdAt")
def created_at(self) -> pulumi.Output[str]:
return pulumi.get(self, "created_at")
@property
@pulumi.getter(name="currencyCode")
def currency_code(self) -> pulumi.Output[str]:
return pulumi.get(self, "currency_code")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="endTime")
def end_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "end_time")
@property
@pulumi.getter(name="fixedFeeFrequency")
def fixed_fee_frequency(self) -> pulumi.Output[int]:
return pulumi.get(self, "fixed_fee_frequency")
@property
@pulumi.getter(name="fixedRecurringFee")
def fixed_recurring_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']:
return pulumi.get(self, "fixed_recurring_fee")
@property
@pulumi.getter(name="lastModifiedAt")
def last_modified_at(self) -> pulumi.Output[str]:
return pulumi.get(self, "last_modified_at")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="revenueShareRates")
def revenue_share_rates(self) -> pulumi.Output[Sequence['outputs.GoogleCloudApigeeV1RevenueShareRangeResponse']]:
return pulumi.get(self, "revenue_share_rates")
@property
@pulumi.getter(name="revenueShareType")
def revenue_share_type(self) -> pulumi.Output[str]:
return pulumi.get(self, "revenue_share_type")
@property
@pulumi.getter(name="setupFee")
def setup_fee(self) -> pulumi.Output['outputs.GoogleTypeMoneyResponse']:
return pulumi.get(self, "setup_fee")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> pulumi.Output[str]:
return pulumi.get(self, "start_time")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
return pulumi.get(self, "state")
| true | true |
f723e43173495e3c39cb07e034e93d2ac42f8401 | 2,275 | py | Python | frappe/cache_manager.py | karthikeyan5/frappe | d2c652ef3a3cc6997eedcc3925e359e216b8a569 | [
"MIT"
] | null | null | null | frappe/cache_manager.py | karthikeyan5/frappe | d2c652ef3a3cc6997eedcc3925e359e216b8a569 | [
"MIT"
] | null | null | null | frappe/cache_manager.py | karthikeyan5/frappe | d2c652ef3a3cc6997eedcc3925e359e216b8a569 | [
"MIT"
] | null | null | null | # Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.desk.notifications import (delete_notification_count_for,
clear_notifications)
common_default_keys = ["__default", "__global"]
def clear_user_cache(user=None):
cache = frappe.cache()
groups = ("bootinfo", "user_recent", "roles", "user_doc", "lang",
"defaults", "user_permissions", "home_page", "linked_with",
"desktop_icons", 'portal_menu_items')
# this will automatically reload the global cache
# so it is important to clear this first
clear_notifications(user)
if user:
for name in groups:
cache.hdel(name, user)
cache.delete_keys("user:" + user)
clear_defaults_cache(user)
else:
for name in groups:
cache.delete_key(name)
clear_defaults_cache()
clear_global_cache()
def clear_global_cache():
from frappe.website.render import clear_cache as clear_website_cache
clear_doctype_cache()
clear_website_cache()
frappe.cache().delete_value(["app_hooks", "installed_apps",
"app_modules", "module_app", "notification_config", 'system_settings',
'scheduler_events', 'time_zone', 'webhooks', 'active_domains', 'active_modules'])
frappe.setup_module_map()
def clear_defaults_cache(user=None):
if user:
for p in ([user] + common_default_keys):
frappe.cache().hdel("defaults", p)
elif frappe.flags.in_install!="frappe":
frappe.cache().delete_key("defaults")
def clear_doctype_cache(doctype=None):
cache = frappe.cache()
if getattr(frappe.local, 'meta_cache') and (doctype in frappe.local.meta_cache):
del frappe.local.meta_cache[doctype]
for key in ('is_table', 'doctype_modules'):
cache.delete_value(key)
groups = ["meta", "form_meta", "table_columns", "last_modified",
"linked_doctypes", 'notifications', 'workflow']
def clear_single(dt):
for name in groups:
cache.hdel(name, dt)
if doctype:
clear_single(doctype)
# clear all parent doctypes
for dt in frappe.db.get_all('DocField', 'parent', dict(fieldtype='Table', options=doctype)):
clear_single(dt.parent)
# clear all notifications
delete_notification_count_for(doctype)
else:
# clear all
for name in groups:
cache.delete_value(name)
| 27.083333 | 94 | 0.745495 |
from __future__ import unicode_literals
import frappe
import frappe.defaults
from frappe.desk.notifications import (delete_notification_count_for,
clear_notifications)
common_default_keys = ["__default", "__global"]
def clear_user_cache(user=None):
cache = frappe.cache()
groups = ("bootinfo", "user_recent", "roles", "user_doc", "lang",
"defaults", "user_permissions", "home_page", "linked_with",
"desktop_icons", 'portal_menu_items')
clear_notifications(user)
if user:
for name in groups:
cache.hdel(name, user)
cache.delete_keys("user:" + user)
clear_defaults_cache(user)
else:
for name in groups:
cache.delete_key(name)
clear_defaults_cache()
clear_global_cache()
def clear_global_cache():
from frappe.website.render import clear_cache as clear_website_cache
clear_doctype_cache()
clear_website_cache()
frappe.cache().delete_value(["app_hooks", "installed_apps",
"app_modules", "module_app", "notification_config", 'system_settings',
'scheduler_events', 'time_zone', 'webhooks', 'active_domains', 'active_modules'])
frappe.setup_module_map()
def clear_defaults_cache(user=None):
if user:
for p in ([user] + common_default_keys):
frappe.cache().hdel("defaults", p)
elif frappe.flags.in_install!="frappe":
frappe.cache().delete_key("defaults")
def clear_doctype_cache(doctype=None):
cache = frappe.cache()
if getattr(frappe.local, 'meta_cache') and (doctype in frappe.local.meta_cache):
del frappe.local.meta_cache[doctype]
for key in ('is_table', 'doctype_modules'):
cache.delete_value(key)
groups = ["meta", "form_meta", "table_columns", "last_modified",
"linked_doctypes", 'notifications', 'workflow']
def clear_single(dt):
for name in groups:
cache.hdel(name, dt)
if doctype:
clear_single(doctype)
for dt in frappe.db.get_all('DocField', 'parent', dict(fieldtype='Table', options=doctype)):
clear_single(dt.parent)
delete_notification_count_for(doctype)
else:
for name in groups:
cache.delete_value(name)
| true | true |
f723e7c0843a98cbd38780ee04a037d43c171c68 | 15,765 | py | Python | dataset_loader.py | fqnchina/NeuralRouting | 333dc95cb2d9a779de88e2349883a0002111d1b3 | [
"MIT"
] | 58 | 2021-03-25T19:18:56.000Z | 2022-03-30T04:59:32.000Z | dataset_loader.py | fqnchina/NeuralRouting | 333dc95cb2d9a779de88e2349883a0002111d1b3 | [
"MIT"
] | null | null | null | dataset_loader.py | fqnchina/NeuralRouting | 333dc95cb2d9a779de88e2349883a0002111d1b3 | [
"MIT"
] | 6 | 2021-06-19T03:48:50.000Z | 2021-07-02T13:05:04.000Z | import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as tfs
from PIL import Image
import os, cv2, copy, time
from config import *
# args.
image_height, image_width = opt.image_height, opt.image_width
intrinsics = opt.intrinsics
close_radius, far_radiuses = 0, opt.far_radiuses
n_neighb_pts = opt.n_neighb_pts
def isSon(son, fa):
for i in range(len(fa)):
if son[i] != fa[i]:
return False
return True
# todo: to be migrated...
def depth2local(depth): # depth: float32, meter.
cx, cy, fx, fy = intrinsics[0, 2], intrinsics[1, 2], intrinsics[0, 0], intrinsics[1, 1]
u_base = np.tile(np.arange(image_width), (image_height, 1))
v_base = np.tile(np.arange(image_height)[:, np.newaxis], (1, image_width))
X = (u_base - cx) * depth / fx
Y = (v_base - cy) * depth / fy
coord_local = np.stack((X, Y, depth), axis=2)
return coord_local
def partial_pts(pts_all_in, p, r_min, r_max): # pts_all_in.shape (#points, #channel)
pts_all = copy.deepcopy(pts_all_in)
p_mat = p[np.newaxis, 0:3].repeat(pts_all.shape[0], axis=0)
norms = np.linalg.norm((p_mat - pts_all[:, 0:3]), axis=1)
return pts_all[np.logical_and(norms >= r_min, norms <= r_max)]
def sample_pts(pts_in, num): # pts_in.shape (#points, #channel)
pts = copy.deepcopy(pts_in)
while pts.shape[0] < num:
pts = np.concatenate((pts, pts), axis=0)
rand_ids = np.arange(pts.shape[0])
np.random.shuffle(rand_ids)
return pts[rand_ids[0:num], :]
def sample_pts_rc(pts_in, rcs_in, num): # pts_in.shape (#points, #channel)
pts = copy.deepcopy(pts_in)
rcs = copy.deepcopy(rcs_in)
while pts.shape[0] < num:
pts = np.concatenate((pts, pts), axis=0)
rand_ids = np.arange(pts.shape[0])
np.random.shuffle(rand_ids)
return pts[rand_ids[0:num], :], rcs_in[rand_ids[0:num], :]
def sample_pts9d_r3d(pts_in, num, radius): # pts_in.shape (#points, #channel)
pts = copy.deepcopy(pts_in)
thresh = 500
# remove background by 3d radius
xyz = pts[:, 0:3]
pts = pts[np.linalg.norm(xyz, axis=1) <= radius]
# print('pt num after r3d {}'.format(pts.shape[0]))
if pts.shape[0] < thresh: # avoid infinite loop.
return None
while pts.shape[0] < num:
pts = np.concatenate((pts, pts), axis=0)
rand_ids = np.arange(pts.shape[0])
np.random.shuffle(rand_ids)
return pts[rand_ids[0:num], :]
def shift_pts(pts_in, cen): # pts_in.shape (#points, #channel)
pts = copy.deepcopy(pts_in)
cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)
pts[:, 0:3] = pts[:, 0:3] - cen_mat
return pts
def shift_pts6d(pts_in, cen): # pts_in.shape (#points, #channel)
pts = copy.deepcopy(pts_in)
cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)
pts[:, :] = pts[:, :] - cen_mat
return pts
def shift_pts9d(pts_in, cen): # pts_in.shape (#points, #channel)
cpt = copy.deepcopy(cen)
cpt[3:6] = np.zeros(3) # remove shift of normal
pts = copy.deepcopy(pts_in)
cpt_mat = cpt[np.newaxis, :].repeat(pts.shape[0], axis=0)
pts[:, :] = pts[:, :] - cpt_mat
return pts
def make_ppf(pts9d, cen9d): # (N,9), (9,)
# prepare
n_pts = pts9d.shape[0]
d = pts9d[:, 0:3]
n2 = pts9d[:, 3:6]
n1 = np.repeat(cen9d[3:6].reshape(1, 3), n_pts, axis=0)
# ppf
dim1 = np.linalg.norm(d, axis=1).reshape(n_pts, 1)
d = d / (dim1.reshape(n_pts, 1))
dim2 = np.sum(n1 * d, axis=1).reshape(n_pts, 1)
dim3 = np.sum(n2 * d, axis=1).reshape(n_pts, 1)
dim4 = np.sum(n1 * n2, axis=1).reshape(n_pts, 1)
ppf = np.concatenate((dim1, dim2, dim3, dim4), axis=1)
ppf7d = np.concatenate((ppf, pts9d[:, 6:9]), axis=1)
return ppf7d
def compute_points_normal(pts):
raw_shape = pts.shape
normal = np.zeros((raw_shape)) # (r,c,3)
t0 = time.time()
for r in range(2, raw_shape[0] - 2):
for c in range(2, raw_shape[1] - 2):
pts_local = pts[r - 2:r + 3, c - 2:c + 3, :] # (5,5,3)
pts_local = pts_local.reshape(-1, 3) # (N,3)
pts_local = pts_local[np.linalg.norm(pts_local - pts[r, c, :], axis=1) < 0.1] # remove outliers.
if pts_local.shape[0] < 4:
continue
pts_local = pts_local - np.mean(pts_local, axis=0)
C = pts_local.T @ pts_local / pts_local.shape[0]
e, v = np.linalg.eig(C)
d = v[:, np.where(e == np.min(e))[0][0]]
n = d / np.linalg.norm(d)
if np.dot(n, np.array([0, 0, 1])) > 0:
n = -n
normal[r, c, :] = n
t1 = time.time()
print('preprocess data: compute normal cost {:.2f}s'.format(t1 - t0))
return normal
# for depth adaptive 2d
def partial_pts_2d(pts_rc, cen_rc, list_drdc):
result = None
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
mat_drdc = (np.array(list_drdc) / 4).astype(int)
mat_cen_rc = np.array(cen_rc)
mat_targ_rc = cen_rc + mat_drdc
mat_targ_rc[mat_targ_rc < 0] = 0
targ_r = mat_targ_rc[:, 0]
targ_r[targ_r > r_max] = r_max
targ_c = mat_targ_rc[:, 1]
targ_c[targ_c > c_max] = c_max
result = pts_rc[targ_r, targ_c]
return copy.deepcopy(result)
# for depth adaptive 2d
def partial_pts_2d_rc(pts_rc, cen_rc, list_drdc):
result = None
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
mat_drdc = (np.array(list_drdc) / 4).astype(int)
mat_cen_rc = np.array(cen_rc)
mat_targ_rc = cen_rc + mat_drdc
mat_targ_rc[mat_targ_rc < 0] = 0
targ_r = mat_targ_rc[:, 0]
targ_r[targ_r > r_max] = r_max
targ_c = mat_targ_rc[:, 1]
targ_c[targ_c > c_max] = c_max
result = pts_rc[targ_r, targ_c]
return copy.deepcopy(result), copy.deepcopy(
np.concatenate((targ_r.reshape(targ_r.shape[0], 1), targ_c.reshape(targ_c.shape[0], 1)), axis=1))
# for depth adaptive 2d with dynamics label
def partial_pts_2d_with_label(pts_rc, cen_rc, list_drdc, mask): # mask: 0 for static pixel, 255 for dynamic pixel.
result = None
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
mat_drdc = (np.array(list_drdc) / 4).astype(int)
mat_cen_rc = np.array(cen_rc)
mat_targ_rc = cen_rc + mat_drdc
mat_targ_rc[mat_targ_rc < 0] = 0
targ_r = mat_targ_rc[:, 0]
targ_r[targ_r > r_max] = r_max
targ_c = mat_targ_rc[:, 1]
targ_c[targ_c > c_max] = c_max
m1 = np.zeros((mask.shape[0], mask.shape[1]))
m1[mask == 0] = 1
m2 = np.zeros((mask.shape[0], mask.shape[1]))
m2[targ_r, targ_c] = 1
m3 = np.logical_and(m1, m2)
result = pts_rc[m3]
return copy.deepcopy(result)
class LevelDataset_PPF(Dataset):
def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None, far_radius=None,
enable_color_aug=True, specified_node=None):
super().__init__()
self.data_dir, self.the_list = data_dir, the_list
self.n_pts_per_frame = n_pts_per_frame
self.neighbor_da2d = neighbor_da2d # (n_pts, dim_pt).
self.far_radius = far_radius # scalar.
self.enable_color_aug = enable_color_aug
self.specified_node = specified_node
def __len__(self):
return len(self.the_list)
def __getitem__(self, idx):
fid, rc_route = self.the_list[idx]
# load
depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0
color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]
# color jitter
if self.enable_color_aug:
img = Image.fromarray(color)
if np.random.rand() < 0.5:
img = tfs.ColorJitter(brightness=1.)(img)
if np.random.rand() < 0.5:
img = tfs.ColorJitter(contrast=1.)(img)
if np.random.rand() < 0.5:
img = tfs.ColorJitter(saturation=1.)(img)
color = np.array(img)
if np.max(color) > 1:
color = color / 255. - 0.5
local = depth2local(depth)
r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))
depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :]
# normal by 3d neighbor plane fitting.
normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)
if os.path.exists(normal_path):
# print('fid {}'.format(fid)) # to debug rio10 scene09 10
# normal = np.load(normal_path)
if os.path.getsize(normal_path) > 1:
normal = np.load(normal_path, encoding='bytes', allow_pickle=True)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)
# build a patch
rand_ids = np.arange(len(rc_route))
np.random.shuffle(rand_ids)
selected_ids = rand_ids[0:self.n_pts_per_frame * 2] # more candidates
pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))
nb_in = torch.zeros((self.n_pts_per_frame, 7, opt.n_neighb_pts))
route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)).fill_(ary)
rc_list = []
# da2d+3d neighbor
if not self.neighbor_da2d is None:
sid = 0
for tmp_idx in range(len(selected_ids)):
r, c = rc_route[selected_ids[tmp_idx]][0], rc_route[selected_ids[tmp_idx]][1]
if np.isnan(lclnmlclr[r, c, 3]):
continue
if self.specified_node:
if not isSon(rc_route[selected_ids[tmp_idx]][2], self.specified_node):
continue
route_labs[sid] = torch.Tensor(rc_route[selected_ids[tmp_idx]][2])
rc_list.append([r, c])
pt_in[sid] = torch.Tensor(
np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))
da2d_list = (np.array(self.neighbor_da2d) / depth[r, c]).astype(int)
# ppf
pts9d = shift_pts9d(sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),
lclnmlclr[r, c, :])
cen9d = copy.deepcopy(lclnmlclr[r, c, :])
cen9d[0:3] = np.zeros(3)
ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,)
ppf7d[np.isnan(ppf7d)] = 0.0
nb_in[sid] = torch.Tensor(ppf7d).transpose(1, 0)
# remove background by 3d radius
xyz = pts9d[:, 0:3]
ids_out_of_bound = np.linalg.norm(xyz, axis=1) > self.far_radius
nb_in[sid, :, ids_out_of_bound] = 0.
# count
sid += 1
if sid >= self.n_pts_per_frame:
break
pt_in = pt_in[:sid]
nb_in = nb_in[:sid]
route_labs = route_labs[:sid]
return pt_in, nb_in, route_labs, fid, torch.Tensor(np.array(rc_list))
class TestDataset_PPF(Dataset):
def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None):
super().__init__()
self.data_dir, self.the_list = data_dir, the_list
self.n_pts_per_frame = n_pts_per_frame
self.neighbor_da2d = neighbor_da2d # list of (n_pts, dim_pt)
def __len__(self):
return len(self.the_list)
def __getitem__(self, idx):
fid = self.the_list[idx]
# load
depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0
color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]
if np.max(color) > 1:
color = color / 255. - 0.5
local = depth2local(depth)
r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))
depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids,
:]
# normal by 3d neighbor plane fitting.
normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)
if os.path.exists(normal_path):
# normal = np.load(normal_path)
if os.path.getsize(normal_path) > 1:
normal = np.load(normal_path, encoding='bytes', allow_pickle=True)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)
# build a patch
pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))
nb_ms_in = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1, 7, opt.n_neighb_pts))
route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1))
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
rc_list = []
# da2d+3d neighbor
if not self.neighbor_da2d is None:
sid, count_crt, count_max = 0, 0, 9999
mask = np.zeros((r_max, c_max))
while len(rc_list) < self.n_pts_per_frame:
# avoid infinite loop
count_crt += 1
if count_crt > count_max:
break
r, c = np.random.randint(0, r_max), np.random.randint(0, c_max)
if depth[r, c] == 0. or mask[r, c] == 1.:
continue
if np.isnan(lclnmlclr[r, c, 3]):
continue
mask[r, c] = 1.
rc_list.append([r, c])
pt_in[sid] = torch.Tensor(
np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))
for lid in range(opt.tree_height - 1):
da2d_list = (np.array(self.neighbor_da2d[lid]) / depth[r, c]).astype(int)
# ppf
pts9d = shift_pts9d(
sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),
lclnmlclr[r, c, :])
cen9d = copy.deepcopy(lclnmlclr[r, c, :])
cen9d[0:3] = np.zeros(3)
ppf7d = make_ppf(pts9d, cen9d) # (N,9), (9,)
ppf7d[np.isnan(ppf7d)] = 0.0
nb_ms_in[sid, lid, :, :] = torch.Tensor(ppf7d).transpose(1, 0)
# remove background by 3d radius
xyz = pts9d[:, 0:3]
ids_out_of_bound = np.linalg.norm(xyz, axis=1) > opt.far_radiuses[lid]
nb_ms_in[sid, lid, :, ids_out_of_bound] = 0.
# count
sid += 1
return pt_in, nb_ms_in, -1, fid, torch.Tensor(np.array(rc_list))
# # debug
# if __name__ == '__main__':
# print('done.')
| 44.159664 | 122 | 0.562195 | import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as tfs
from PIL import Image
import os, cv2, copy, time
from config import *
image_height, image_width = opt.image_height, opt.image_width
intrinsics = opt.intrinsics
close_radius, far_radiuses = 0, opt.far_radiuses
n_neighb_pts = opt.n_neighb_pts
def isSon(son, fa):
for i in range(len(fa)):
if son[i] != fa[i]:
return False
return True
def depth2local(depth):
cx, cy, fx, fy = intrinsics[0, 2], intrinsics[1, 2], intrinsics[0, 0], intrinsics[1, 1]
u_base = np.tile(np.arange(image_width), (image_height, 1))
v_base = np.tile(np.arange(image_height)[:, np.newaxis], (1, image_width))
X = (u_base - cx) * depth / fx
Y = (v_base - cy) * depth / fy
coord_local = np.stack((X, Y, depth), axis=2)
return coord_local
def partial_pts(pts_all_in, p, r_min, r_max): pts_all_in)
p_mat = p[np.newaxis, 0:3].repeat(pts_all.shape[0], axis=0)
norms = np.linalg.norm((p_mat - pts_all[:, 0:3]), axis=1)
return pts_all[np.logical_and(norms >= r_min, norms <= r_max)]
def sample_pts(pts_in, num): in)
while pts.shape[0] < num:
pts = np.concatenate((pts, pts), axis=0)
rand_ids = np.arange(pts.shape[0])
np.random.shuffle(rand_ids)
return pts[rand_ids[0:num], :]
def sample_pts_rc(pts_in, rcs_in, num): in)
rcs = copy.deepcopy(rcs_in)
while pts.shape[0] < num:
pts = np.concatenate((pts, pts), axis=0)
rand_ids = np.arange(pts.shape[0])
np.random.shuffle(rand_ids)
return pts[rand_ids[0:num], :], rcs_in[rand_ids[0:num], :]
def sample_pts9d_r3d(pts_in, num, radius): in)
thresh = 500
xyz = pts[:, 0:3]
pts = pts[np.linalg.norm(xyz, axis=1) <= radius]
if pts.shape[0] < thresh:
return None
while pts.shape[0] < num:
pts = np.concatenate((pts, pts), axis=0)
rand_ids = np.arange(pts.shape[0])
np.random.shuffle(rand_ids)
return pts[rand_ids[0:num], :]
def shift_pts(pts_in, cen): in)
cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)
pts[:, 0:3] = pts[:, 0:3] - cen_mat
return pts
def shift_pts6d(pts_in, cen): in)
cen_mat = cen[np.newaxis, :].repeat(pts.shape[0], axis=0)
pts[:, :] = pts[:, :] - cen_mat
return pts
def shift_pts9d(pts_in, cen):
cpt[3:6] = np.zeros(3)
pts = copy.deepcopy(pts_in)
cpt_mat = cpt[np.newaxis, :].repeat(pts.shape[0], axis=0)
pts[:, :] = pts[:, :] - cpt_mat
return pts
def make_ppf(pts9d, cen9d):
n_pts = pts9d.shape[0]
d = pts9d[:, 0:3]
n2 = pts9d[:, 3:6]
n1 = np.repeat(cen9d[3:6].reshape(1, 3), n_pts, axis=0)
dim1 = np.linalg.norm(d, axis=1).reshape(n_pts, 1)
d = d / (dim1.reshape(n_pts, 1))
dim2 = np.sum(n1 * d, axis=1).reshape(n_pts, 1)
dim3 = np.sum(n2 * d, axis=1).reshape(n_pts, 1)
dim4 = np.sum(n1 * n2, axis=1).reshape(n_pts, 1)
ppf = np.concatenate((dim1, dim2, dim3, dim4), axis=1)
ppf7d = np.concatenate((ppf, pts9d[:, 6:9]), axis=1)
return ppf7d
def compute_points_normal(pts):
raw_shape = pts.shape
normal = np.zeros((raw_shape))
t0 = time.time()
for r in range(2, raw_shape[0] - 2):
for c in range(2, raw_shape[1] - 2):
pts_local = pts[r - 2:r + 3, c - 2:c + 3, :]
pts_local = pts_local.reshape(-1, 3)
pts_local = pts_local[np.linalg.norm(pts_local - pts[r, c, :], axis=1) < 0.1]
if pts_local.shape[0] < 4:
continue
pts_local = pts_local - np.mean(pts_local, axis=0)
C = pts_local.T @ pts_local / pts_local.shape[0]
e, v = np.linalg.eig(C)
d = v[:, np.where(e == np.min(e))[0][0]]
n = d / np.linalg.norm(d)
if np.dot(n, np.array([0, 0, 1])) > 0:
n = -n
normal[r, c, :] = n
t1 = time.time()
print('preprocess data: compute normal cost {:.2f}s'.format(t1 - t0))
return normal
def partial_pts_2d(pts_rc, cen_rc, list_drdc):
result = None
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
mat_drdc = (np.array(list_drdc) / 4).astype(int)
mat_cen_rc = np.array(cen_rc)
mat_targ_rc = cen_rc + mat_drdc
mat_targ_rc[mat_targ_rc < 0] = 0
targ_r = mat_targ_rc[:, 0]
targ_r[targ_r > r_max] = r_max
targ_c = mat_targ_rc[:, 1]
targ_c[targ_c > c_max] = c_max
result = pts_rc[targ_r, targ_c]
return copy.deepcopy(result)
def partial_pts_2d_rc(pts_rc, cen_rc, list_drdc):
result = None
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
mat_drdc = (np.array(list_drdc) / 4).astype(int)
mat_cen_rc = np.array(cen_rc)
mat_targ_rc = cen_rc + mat_drdc
mat_targ_rc[mat_targ_rc < 0] = 0
targ_r = mat_targ_rc[:, 0]
targ_r[targ_r > r_max] = r_max
targ_c = mat_targ_rc[:, 1]
targ_c[targ_c > c_max] = c_max
result = pts_rc[targ_r, targ_c]
return copy.deepcopy(result), copy.deepcopy(
np.concatenate((targ_r.reshape(targ_r.shape[0], 1), targ_c.reshape(targ_c.shape[0], 1)), axis=1))
def partial_pts_2d_with_label(pts_rc, cen_rc, list_drdc, mask):
result = None
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
mat_drdc = (np.array(list_drdc) / 4).astype(int)
mat_cen_rc = np.array(cen_rc)
mat_targ_rc = cen_rc + mat_drdc
mat_targ_rc[mat_targ_rc < 0] = 0
targ_r = mat_targ_rc[:, 0]
targ_r[targ_r > r_max] = r_max
targ_c = mat_targ_rc[:, 1]
targ_c[targ_c > c_max] = c_max
m1 = np.zeros((mask.shape[0], mask.shape[1]))
m1[mask == 0] = 1
m2 = np.zeros((mask.shape[0], mask.shape[1]))
m2[targ_r, targ_c] = 1
m3 = np.logical_and(m1, m2)
result = pts_rc[m3]
return copy.deepcopy(result)
class LevelDataset_PPF(Dataset):
def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None, far_radius=None,
enable_color_aug=True, specified_node=None):
super().__init__()
self.data_dir, self.the_list = data_dir, the_list
self.n_pts_per_frame = n_pts_per_frame
self.neighbor_da2d = neighbor_da2d
self.far_radius = far_radius
self.enable_color_aug = enable_color_aug
self.specified_node = specified_node
def __len__(self):
return len(self.the_list)
def __getitem__(self, idx):
fid, rc_route = self.the_list[idx]
depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0
color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]
if self.enable_color_aug:
img = Image.fromarray(color)
if np.random.rand() < 0.5:
img = tfs.ColorJitter(brightness=1.)(img)
if np.random.rand() < 0.5:
img = tfs.ColorJitter(contrast=1.)(img)
if np.random.rand() < 0.5:
img = tfs.ColorJitter(saturation=1.)(img)
color = np.array(img)
if np.max(color) > 1:
color = color / 255. - 0.5
local = depth2local(depth)
r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))
depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids, :]
normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)
if os.path.exists(normal_path):
os.path.getsize(normal_path) > 1:
normal = np.load(normal_path, encoding='bytes', allow_pickle=True)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)
rand_ids = np.arange(len(rc_route))
np.random.shuffle(rand_ids)
selected_ids = rand_ids[0:self.n_pts_per_frame * 2]
pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))
nb_in = torch.zeros((self.n_pts_per_frame, 7, opt.n_neighb_pts))
route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1)).fill_(ary)
rc_list = []
if not self.neighbor_da2d is None:
sid = 0
for tmp_idx in range(len(selected_ids)):
r, c = rc_route[selected_ids[tmp_idx]][0], rc_route[selected_ids[tmp_idx]][1]
if np.isnan(lclnmlclr[r, c, 3]):
continue
if self.specified_node:
if not isSon(rc_route[selected_ids[tmp_idx]][2], self.specified_node):
continue
route_labs[sid] = torch.Tensor(rc_route[selected_ids[tmp_idx]][2])
rc_list.append([r, c])
pt_in[sid] = torch.Tensor(
np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))
da2d_list = (np.array(self.neighbor_da2d) / depth[r, c]).astype(int)
pts9d = shift_pts9d(sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),
lclnmlclr[r, c, :])
cen9d = copy.deepcopy(lclnmlclr[r, c, :])
cen9d[0:3] = np.zeros(3)
ppf7d = make_ppf(pts9d, cen9d)
ppf7d[np.isnan(ppf7d)] = 0.0
nb_in[sid] = torch.Tensor(ppf7d).transpose(1, 0)
xyz = pts9d[:, 0:3]
ids_out_of_bound = np.linalg.norm(xyz, axis=1) > self.far_radius
nb_in[sid, :, ids_out_of_bound] = 0.
sid += 1
if sid >= self.n_pts_per_frame:
break
pt_in = pt_in[:sid]
nb_in = nb_in[:sid]
route_labs = route_labs[:sid]
return pt_in, nb_in, route_labs, fid, torch.Tensor(np.array(rc_list))
class TestDataset_PPF(Dataset):
def __init__(self, data_dir, the_list, n_pts_per_frame=opt.n_pts_per_frame, neighbor_da2d=None):
super().__init__()
self.data_dir, self.the_list = data_dir, the_list
self.n_pts_per_frame = n_pts_per_frame
self.neighbor_da2d = neighbor_da2d
def __len__(self):
return len(self.the_list)
def __getitem__(self, idx):
fid = self.the_list[idx]
depth = cv2.imread('{}/{}'.format(self.data_dir, opt.depth_format.format(fid)), cv2.IMREAD_UNCHANGED) / 1000.0
color = cv2.imread('{}/{}'.format(self.data_dir, opt.color_format.format(fid)), cv2.IMREAD_UNCHANGED)[:, :, 0:3]
if np.max(color) > 1:
color = color / 255. - 0.5
local = depth2local(depth)
r_ids, c_ids = list(range(0, image_height, 4)), list(range(0, image_width, 4))
depth, color, local = depth[r_ids, :][:, c_ids], color[r_ids, :, :][:, c_ids, :], local[r_ids, :, :][:, c_ids,
:]
normal_path = '{}/frame-{:06d}.scaled.normal.npy'.format(self.data_dir, fid)
if os.path.exists(normal_path):
if os.path.getsize(normal_path) > 1:
normal = np.load(normal_path, encoding='bytes', allow_pickle=True)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
else:
normal = compute_points_normal(local)
np.save(normal_path, normal)
lclnmlclr = np.concatenate((np.concatenate((local, normal), axis=2), color), axis=2)
pt_in = torch.zeros((self.n_pts_per_frame, 7, 1))
nb_ms_in = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1, 7, opt.n_neighb_pts))
route_labs = torch.zeros((self.n_pts_per_frame, opt.tree_height - 1))
r_max, c_max = int(image_height / 4 - 1), int(image_width / 4 - 1)
rc_list = []
if not self.neighbor_da2d is None:
sid, count_crt, count_max = 0, 0, 9999
mask = np.zeros((r_max, c_max))
while len(rc_list) < self.n_pts_per_frame:
count_crt += 1
if count_crt > count_max:
break
r, c = np.random.randint(0, r_max), np.random.randint(0, c_max)
if depth[r, c] == 0. or mask[r, c] == 1.:
continue
if np.isnan(lclnmlclr[r, c, 3]):
continue
mask[r, c] = 1.
rc_list.append([r, c])
pt_in[sid] = torch.Tensor(
np.concatenate((np.array([[0.], [0.], [0.], [0.]]), color[r, c, 0:3][:, np.newaxis]), axis=0))
for lid in range(opt.tree_height - 1):
da2d_list = (np.array(self.neighbor_da2d[lid]) / depth[r, c]).astype(int)
pts9d = shift_pts9d(
sample_pts(partial_pts_2d(lclnmlclr, (r, c), da2d_list), opt.n_neighb_pts),
lclnmlclr[r, c, :])
cen9d = copy.deepcopy(lclnmlclr[r, c, :])
cen9d[0:3] = np.zeros(3)
ppf7d = make_ppf(pts9d, cen9d)
ppf7d[np.isnan(ppf7d)] = 0.0
nb_ms_in[sid, lid, :, :] = torch.Tensor(ppf7d).transpose(1, 0)
xyz = pts9d[:, 0:3]
ids_out_of_bound = np.linalg.norm(xyz, axis=1) > opt.far_radiuses[lid]
nb_ms_in[sid, lid, :, ids_out_of_bound] = 0.
sid += 1
return pt_in, nb_ms_in, -1, fid, torch.Tensor(np.array(rc_list))
| true | true |
f723e84d3365845116c25dda9340902ab173b6cd | 799 | py | Python | core/data/dataloader/__init__.py | HareshKarnan/awesome-semantic-segmentation-pytorch | 3c53fc004973abcb88882dcc8be899570c3053cf | [
"Apache-2.0"
] | null | null | null | core/data/dataloader/__init__.py | HareshKarnan/awesome-semantic-segmentation-pytorch | 3c53fc004973abcb88882dcc8be899570c3053cf | [
"Apache-2.0"
] | null | null | null | core/data/dataloader/__init__.py | HareshKarnan/awesome-semantic-segmentation-pytorch | 3c53fc004973abcb88882dcc8be899570c3053cf | [
"Apache-2.0"
] | null | null | null | """
This module provides data loaders and transformers for popular vision datasets.
"""
from .mscoco import COCOSegmentation
from .cityscapes import CitySegmentation
from .ade import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .sbu_shadow import SBUSegmentation
from .ycb import YCBSegmentation
from .robocup import RobocupSegmentation
datasets = {
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation,
'coco': COCOSegmentation,
'citys': CitySegmentation,
'sbu': SBUSegmentation,
'ycb': YCBSegmentation,
'robocup': RobocupSegmentation,
}
def get_segmentation_dataset(name, **kwargs):
"""Segmentation Datasets"""
return datasets[name.lower()](**kwargs)
| 28.535714 | 79 | 0.767209 | from .mscoco import COCOSegmentation
from .cityscapes import CitySegmentation
from .ade import ADE20KSegmentation
from .pascal_voc import VOCSegmentation
from .pascal_aug import VOCAugSegmentation
from .sbu_shadow import SBUSegmentation
from .ycb import YCBSegmentation
from .robocup import RobocupSegmentation
datasets = {
'ade20k': ADE20KSegmentation,
'pascal_voc': VOCSegmentation,
'pascal_aug': VOCAugSegmentation,
'coco': COCOSegmentation,
'citys': CitySegmentation,
'sbu': SBUSegmentation,
'ycb': YCBSegmentation,
'robocup': RobocupSegmentation,
}
def get_segmentation_dataset(name, **kwargs):
return datasets[name.lower()](**kwargs)
| true | true |
f723e8b52caef4e5fb333357c7a8f97adccc1b2a | 11,436 | py | Python | src/roslaunch2/package.py | CodeFinder2/roslaunch2 | 5c2aa58129671647aa8e5cbc0541caf280accffb | [
"BSD-3-Clause"
] | 10 | 2019-11-19T12:35:30.000Z | 2022-01-16T15:59:44.000Z | src/roslaunch2/package.py | CodeFinder2/roslaunch2 | 5c2aa58129671647aa8e5cbc0541caf280accffb | [
"BSD-3-Clause"
] | 1 | 2022-01-11T09:30:36.000Z | 2022-02-07T22:03:36.000Z | src/roslaunch2/package.py | CodeFinder2/roslaunch2 | 5c2aa58129671647aa8e5cbc0541caf280accffb | [
"BSD-3-Clause"
] | 3 | 2019-08-01T08:50:00.000Z | 2021-05-02T01:27:47.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Adrian Böckenkamp
# License: BSD (https://opensource.org/licenses/BSD-3-Clause)
# Date: 08/06/2020
import rospkg
import os
import sys
import Pyro4
from . import logger
class Package:
"""
Encapsulates a ROS package and its ability to find files in the package directory structure. A caching mechanism is
used to speedup `*find()` commands.
"""
__pkg_cache = {}
__dir_cache = {}
__find_cache = {}
@staticmethod
def invalidate_cache():
"""
Invalidates the package, directory and file cache for finding packages, enforcing re-lookups.
"""
Package.__pkg_cache = {}
Package.__dir_cache = {}
Package.__find_cache = {}
@staticmethod
def get_paths_to_file(start_dir, file_comp):
"""
Searches for file_comp in `start_dir` recursively (also using a cache for speedup).
:param start_dir: root directory where to start the search
:param file_comp: file path component (like some/dir/myfile.xml) name to search for
:return: Set of files found (with their full path)
"""
file_name = os.path.basename(file_comp)
dir_comp = os.path.dirname(file_comp)
result = []
if start_dir in Package.__dir_cache: # use cached file listing of $start_dir
for root, file_set in Package.__dir_cache[start_dir]:
for a_file in file_set:
if a_file == file_name and root.endswith(dir_comp):
result.append(os.path.join(root, a_file))
else: # crawl the file system at $start_dir (and cache for future requests)
cache_entry = []
for root, _, file_set in os.walk(start_dir):
cache_entry.append((root, file_set))
for a_file in file_set:
if a_file == file_name and root.endswith(dir_comp):
result.append(os.path.join(root, a_file))
Package.__dir_cache[start_dir] = cache_entry
return result
@staticmethod
def __get_pkg_path_cached(name):
"""
Tries to find the given package name in the cache. If its not present in the cache, the cache is updated by a
(slower) filesystem lookup.
:param name: Name of ROS package
:return: Path to package
"""
if name not in Package.__pkg_cache:
Package.__pkg_cache[name] = rospkg.RosPack().get_path(name) # may throws rospkg.ResourceNotFound
return Package.__pkg_cache[name]
def __init__(self, name=None, silent=False):
"""
Initializes the ROS package given its name. The path to the package will automatically be resolved on
construction.
:param name: Name of ROS package
:param silent: True if no exceptions should be thrown if the package was not found
"""
try:
self.set_name(name)
except rospkg.ResourceNotFound:
if not silent:
raise
@Pyro4.expose
def get_name(self):
"""
Returns the package name.
:return: ROS package name
"""
return self.__name
@Pyro4.expose
def set_name(self, name):
"""
Updates/sets the package name.
:param name: ROS package name
"""
self.__name = name
self.__path = Package.__get_pkg_path_cached(name)
name = property(get_name, set_name)
@Pyro4.expose
def get_path(self):
"""
Retrieves the package path.
:return: ROS package path
"""
return self.__path
def _set_path(self, pkg_path): # not exposed to Pyro!
if self.__name:
self.__path = pkg_path
else:
self.__path = None
def __nonzero__(self):
return bool(self.__path) # for Python 2.x
def __bool__(self):
return self.__nonzero__() # for Python 3.x
path = property(get_path, _set_path)
def __str__(self):
return self.__name
@staticmethod
def valid(pkg):
"""
Tests whether pkg is a valid ROS package on the current system.
:param pkg: Name of ROS package (type: str) or a valid package.Package object)
:return: Path to pkg if valid or None if not found
"""
try:
if type(pkg) is str:
name = pkg
elif isinstance(pkg, Package):
name = pkg.name
else:
raise ValueError('Cannot process type {}'.format(str(type(pkg))))
return Package.__get_pkg_path_cached(name)
except rospkg.ResourceNotFound:
return None
def has_node(self, node_name, warn=True):
"""
Tests if a ROS node actually exists.
This method checks whether a ROS node named $node_name exists in the current ROS package.
:param node_name: name of ROS node to test
:param warn: True if a warning about the missing node should be emitted
:return: True if node exists, False otherwise
"""
pkg = os.path.join(self.__path, '../..')
# Just consider files that are executable:
if [f for f in Package.get_paths_to_file(pkg, node_name) if os.access(f, os.X_OK)]:
# if len(res) > 1:
# log.warning("Found {} executable files named {}, assuming existence."
# .format(len(res), node_name, res[0]))
return True
else:
if warn:
logger.warning("Node '{}' in package '{}' not found.".format(node_name, self.__name))
return False
@staticmethod
def include(pkg_name, path_comp, **kwargs):
"""
Like use() but static for convenience.
:param pkg_name: Name of ROS package to be used for search of path_comp
:param path_comp: (partial) path or file name to launch module (if it does not end with .pyl, this is added
automatically)
:param kwargs: optional arguments to be passed to the main() function of the launch module
:return: GeneratorBase object as returned by the main() function
"""
assert type(pkg_name) is str
return Package(pkg_name).use(path_comp, **kwargs)
def use(self, path_comp, **kwargs):
"""
Imports (aka uses) the content of a launch module located in the current package (self).
:param path_comp: (partial) path or file name to launch module (if it does not end with .pyl, this is added
automatically)
:param kwargs: optional arguments to be passed to the main() function of the launch module
:return: GeneratorBase object as returned by the main() function
"""
if not os.path.splitext(path_comp)[1]:
path_comp += '.pyl'
mod_path = self.find(path_comp, True)
if not mod_path:
raise ValueError("Launch module '{:s}' in package '{:s}' not found.".format(path_comp, self.__name))
m = Package.import_launch_module(mod_path)
return m.main(**kwargs)
@staticmethod
def import_launch_module(full_module_path):
"""
Rather internal helper function for important a Python module (i. e., a roslaunch2 launch module/file).
This function handles all various cases related to different versions of Python.
:param full_module_path: Full path to module file
:return: Handle to imported module (like "foo" in "import bar as foo")
"""
if sys.version_info < (2, 4): # Python < 2.4 is not supported
raise RuntimeError('Must use Python version >= 2.4!')
if not os.path.isfile(full_module_path):
raise ValueError("Launch module '{:s}' not found.".format(full_module_path))
module_name = os.path.splitext(full_module_path)[0]
# Hot-patch PYTHONPATH to find . imports:
search_path = os.path.dirname(os.path.abspath(module_name))
if search_path not in sys.path:
sys.path.append(search_path)
if sys.version_info < (3, 3): # Python 2.x and 3.y where x >= 4 and y < 3
import imp
return imp.load_source(module_name, full_module_path)
elif sys.version_info < (3, 4): # Python 3.3 and 3.4
import importlib.machinery
return importlib.machinery.SourceFileLoader(module_name, full_module_path).load_module()
elif sys.version_info >= (3, 5): # Python 3.5+
import importlib.util
import importlib.machinery
# Allow any extenstions (not only .py and .so, and .pyl in particular):
importlib.machinery.SOURCE_SUFFIXES.append('')
spec = importlib.util.spec_from_file_location(module_name, full_module_path)
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
return m
@Pyro4.expose
def find(self, path_comp, silent=False):
"""
Searches for a file or directory in the current package (self).
:param path_comp: (partial) path or file name
:param silent: if True return None when nothing is found, otherwise an IOError is raised in
case of failure
:return: first found file (full path) or None if silent==True and nothing found
"""
key = ''.join([self.__name, path_comp])
if key in Package.__find_cache:
return Package.__find_cache[key]
if not path_comp:
return self.__path
dir_path = os.path.join(self.__path, path_comp if not path_comp.startswith(os.path.sep) else path_comp[1:])
if os.path.isdir(dir_path):
Package.__find_cache[key] = dir_path
return dir_path
f = Package.get_paths_to_file(self.__path, path_comp)
if len(f) > 1:
logger.log("Found {} files, unique selection impossible (using first).".format(', '.join(f)))
if not f:
if not silent:
raise IOError("No files like '{}' found in '{}'.".format(path_comp, self.__name))
else:
return None
Package.__find_cache[key] = f[0]
return f[0]
@Pyro4.expose
def selective_find(self, path_comp_options, path_comp_prefix='', silent=False):
"""
Searches for a set of files or directories in the current package (self). Tries to find any
path from the path_comp_options list starting at the first element. Once a path is found
the search for the remaining paths is canceled and the found path is returned.
:param path_comp_options: list of (partial) path or file names
:param path_comp_prefix: prefix to each element of path_comp_options
:param silent: if True return None when nothing is found, otherwise an IOError is raised in
case of failure
:return: first found file (full path) or None if silent==True and nothing found
"""
for path_comp in path_comp_options:
path = self.find(path_comp=os.path.join(path_comp_prefix, path_comp), silent=True)
if path is not None:
return path
# Nothing found
if not silent:
raise IOError("None of the queried files found in '{}'.".format(self.__name))
else:
return None
| 39.030717 | 119 | 0.612977 |
import rospkg
import os
import sys
import Pyro4
from . import logger
class Package:
__pkg_cache = {}
__dir_cache = {}
__find_cache = {}
@staticmethod
def invalidate_cache():
Package.__pkg_cache = {}
Package.__dir_cache = {}
Package.__find_cache = {}
@staticmethod
def get_paths_to_file(start_dir, file_comp):
file_name = os.path.basename(file_comp)
dir_comp = os.path.dirname(file_comp)
result = []
if start_dir in Package.__dir_cache:
for root, file_set in Package.__dir_cache[start_dir]:
for a_file in file_set:
if a_file == file_name and root.endswith(dir_comp):
result.append(os.path.join(root, a_file))
else:
cache_entry = []
for root, _, file_set in os.walk(start_dir):
cache_entry.append((root, file_set))
for a_file in file_set:
if a_file == file_name and root.endswith(dir_comp):
result.append(os.path.join(root, a_file))
Package.__dir_cache[start_dir] = cache_entry
return result
@staticmethod
def __get_pkg_path_cached(name):
if name not in Package.__pkg_cache:
Package.__pkg_cache[name] = rospkg.RosPack().get_path(name)
return Package.__pkg_cache[name]
def __init__(self, name=None, silent=False):
try:
self.set_name(name)
except rospkg.ResourceNotFound:
if not silent:
raise
@Pyro4.expose
def get_name(self):
return self.__name
@Pyro4.expose
def set_name(self, name):
self.__name = name
self.__path = Package.__get_pkg_path_cached(name)
name = property(get_name, set_name)
@Pyro4.expose
def get_path(self):
return self.__path
def _set_path(self, pkg_path):
if self.__name:
self.__path = pkg_path
else:
self.__path = None
def __nonzero__(self):
return bool(self.__path)
def __bool__(self):
return self.__nonzero__()
path = property(get_path, _set_path)
def __str__(self):
return self.__name
@staticmethod
def valid(pkg):
try:
if type(pkg) is str:
name = pkg
elif isinstance(pkg, Package):
name = pkg.name
else:
raise ValueError('Cannot process type {}'.format(str(type(pkg))))
return Package.__get_pkg_path_cached(name)
except rospkg.ResourceNotFound:
return None
def has_node(self, node_name, warn=True):
pkg = os.path.join(self.__path, '../..')
if [f for f in Package.get_paths_to_file(pkg, node_name) if os.access(f, os.X_OK)]:
return True
else:
if warn:
logger.warning("Node '{}' in package '{}' not found.".format(node_name, self.__name))
return False
@staticmethod
def include(pkg_name, path_comp, **kwargs):
assert type(pkg_name) is str
return Package(pkg_name).use(path_comp, **kwargs)
def use(self, path_comp, **kwargs):
if not os.path.splitext(path_comp)[1]:
path_comp += '.pyl'
mod_path = self.find(path_comp, True)
if not mod_path:
raise ValueError("Launch module '{:s}' in package '{:s}' not found.".format(path_comp, self.__name))
m = Package.import_launch_module(mod_path)
return m.main(**kwargs)
@staticmethod
def import_launch_module(full_module_path):
if sys.version_info < (2, 4):
raise RuntimeError('Must use Python version >= 2.4!')
if not os.path.isfile(full_module_path):
raise ValueError("Launch module '{:s}' not found.".format(full_module_path))
module_name = os.path.splitext(full_module_path)[0]
search_path = os.path.dirname(os.path.abspath(module_name))
if search_path not in sys.path:
sys.path.append(search_path)
if sys.version_info < (3, 3):
import imp
return imp.load_source(module_name, full_module_path)
elif sys.version_info < (3, 4):
import importlib.machinery
return importlib.machinery.SourceFileLoader(module_name, full_module_path).load_module()
elif sys.version_info >= (3, 5):
import importlib.util
import importlib.machinery
importlib.machinery.SOURCE_SUFFIXES.append('')
spec = importlib.util.spec_from_file_location(module_name, full_module_path)
m = importlib.util.module_from_spec(spec)
spec.loader.exec_module(m)
return m
@Pyro4.expose
def find(self, path_comp, silent=False):
key = ''.join([self.__name, path_comp])
if key in Package.__find_cache:
return Package.__find_cache[key]
if not path_comp:
return self.__path
dir_path = os.path.join(self.__path, path_comp if not path_comp.startswith(os.path.sep) else path_comp[1:])
if os.path.isdir(dir_path):
Package.__find_cache[key] = dir_path
return dir_path
f = Package.get_paths_to_file(self.__path, path_comp)
if len(f) > 1:
logger.log("Found {} files, unique selection impossible (using first).".format(', '.join(f)))
if not f:
if not silent:
raise IOError("No files like '{}' found in '{}'.".format(path_comp, self.__name))
else:
return None
Package.__find_cache[key] = f[0]
return f[0]
@Pyro4.expose
def selective_find(self, path_comp_options, path_comp_prefix='', silent=False):
for path_comp in path_comp_options:
path = self.find(path_comp=os.path.join(path_comp_prefix, path_comp), silent=True)
if path is not None:
return path
if not silent:
raise IOError("None of the queried files found in '{}'.".format(self.__name))
else:
return None
| true | true |
f723e8f7914d47ae6d2455622f8a55aacd1c6ccf | 72 | py | Python | cibin/__init__.py | betochimasan/AntibodyCocktailEfficiency | 67850eb85d502af80b691b49001803f2e3091a0b | [
"BSD-3-Clause"
] | null | null | null | cibin/__init__.py | betochimasan/AntibodyCocktailEfficiency | 67850eb85d502af80b691b49001803f2e3091a0b | [
"BSD-3-Clause"
] | null | null | null | cibin/__init__.py | betochimasan/AntibodyCocktailEfficiency | 67850eb85d502af80b691b49001803f2e3091a0b | [
"BSD-3-Clause"
] | null | null | null | """
The Cibin package.
"""
__version__ = "0.0.1"
from .cibin import *
| 9 | 21 | 0.611111 |
__version__ = "0.0.1"
from .cibin import *
| true | true |
f723e91ac20eac5aead5f13782edc311af79e501 | 4,293 | py | Python | python/cairo/text_align_center.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 23 | 2015-06-08T13:01:00.000Z | 2021-12-30T08:20:04.000Z | python/cairo/text_align_center.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 1 | 2020-10-22T02:36:10.000Z | 2020-10-22T02:36:10.000Z | python/cairo/text_align_center.py | jeremiedecock/snippets | 4bd4e7f459eee610d5cf19f845299ca942ff4b64 | [
"MIT"
] | 7 | 2017-10-31T09:48:14.000Z | 2022-01-04T15:59:45.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2014 Jérémie DECOCK (http://www.jdhp.org)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# SEE:
# - http://cairographics.org/samples/
# - http://cairographics.org/documentation/pycairo/3/reference/index.html
# - http://cairographics.org/pycairo/tutorial/
# - http://www.tortall.net/mu/wiki/CairoTutorial
import cairo
import math
WIDTH, HEIGHT = 400, 400
def main():
"""Main function"""
# Image surfaces provide the ability to render to memory buffers either
# allocated by cairo or by the calling code.
# List of supported surfaces: http://www.cairographics.org/manual/cairo-surfaces.html
surface = cairo.SVGSurface("text_align_center.svg", WIDTH, HEIGHT)
# cairo.Context is the object that you send your drawing commands to.
context = cairo.Context(surface)
### DRAW ###
# context.set_source_rgb(0., 0., 0.)
# context.set_source_rgba(0., 0., 0., 1.)
# Sets the source pattern within context to an opaque color. This opaque color
# will then be used for any subsequent drawing operation until a new source
# pattern is set.
# The color components are floating point numbers in the range 0 to 1. If
# the values passed in are outside that range, they will be clamped.
# The default source pattern is opaque black, (that is, it is equivalent to
# cairo_set_source_rgb(context, 0.0, 0.0, 0.0)).
# Using set_source_rgb(r, g, b) is equivalent to using
# set_source_rgba(r, g, b, 1.0), and it sets your source color to use
# full opacity.
#
# context.stroke()
# The stroke() operation takes a virtual pen along the current path
# according to the current line width, line join, line cap, and dash
# settings. After cairo_stroke(), the current path will be cleared from
# the cairo context.
# See http://www.cairographics.org/manual/cairo-cairo-t.html#cairo-stroke
#
# context.fill()
# A drawing operator that fills the current path according to the current
# fill rule, (each sub-path is implicitly closed before being filled).
# After cairo_fill(), the current path will be cleared from the cairo
# context.
# See http://www.cairographics.org/manual/cairo-cairo-t.html#cairo-fill
context.set_line_width(0.02)
context.set_source_rgb(1, 1, 1)
context.rectangle(0, 0, WIDTH, HEIGHT)
context.fill()
# TEXT
context.set_source_rgb(0, 0, 0)
context.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
context.set_font_size(52.0)
(x, y, width, height, dx, dy) = context.text_extents("Hello")
context.move_to(WIDTH/2. - width/2., HEIGHT/2. + height/2.)
context.show_text("Hello")
# DRAW HELPING LINES
context.set_source_rgba(1, 0.2, 0.2, 0.6)
context.set_line_width(6.0)
context.arc(WIDTH/2. - width/2., HEIGHT/2. + height/2., 5, 0, math.radians(360))
context.fill()
context.move_to(WIDTH/2., 0)
context.line_to(WIDTH/2, HEIGHT)
context.stroke()
context.move_to(0, HEIGHT/2.)
context.line_to(WIDTH, HEIGHT/2.)
context.stroke()
### WRITE THE SVG FILE ###
surface.finish()
if __name__ == '__main__':
main()
| 37.008621 | 89 | 0.699278 |
import cairo
import math
WIDTH, HEIGHT = 400, 400
def main():
surface = cairo.SVGSurface("text_align_center.svg", WIDTH, HEIGHT)
context = cairo.Context(surface)
xt.set_line_width(0.02)
context.set_source_rgb(1, 1, 1)
context.rectangle(0, 0, WIDTH, HEIGHT)
context.fill()
context.set_source_rgb(0, 0, 0)
context.select_font_face("Sans", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
context.set_font_size(52.0)
(x, y, width, height, dx, dy) = context.text_extents("Hello")
context.move_to(WIDTH/2. - width/2., HEIGHT/2. + height/2.)
context.show_text("Hello")
context.set_source_rgba(1, 0.2, 0.2, 0.6)
context.set_line_width(6.0)
context.arc(WIDTH/2. - width/2., HEIGHT/2. + height/2., 5, 0, math.radians(360))
context.fill()
context.move_to(WIDTH/2., 0)
context.line_to(WIDTH/2, HEIGHT)
context.stroke()
context.move_to(0, HEIGHT/2.)
context.line_to(WIDTH, HEIGHT/2.)
context.stroke()
main()
| true | true |
f723e929ff2525653af0e5fc4da9eb8c8a0a5165 | 532 | py | Python | deploy.py | oneofthezombies/cpp-enum-class-string-idl | e1188e6a94d5c7c6a4bc6a7d025b12f4d22c53e1 | [
"MIT"
] | 1 | 2021-05-08T08:34:09.000Z | 2021-05-08T08:34:09.000Z | deploy.py | oneofthezombies/argparse-best-practice | c29bffb014bb66d7694e4e27ad2911cc5abb3eef | [
"MIT"
] | null | null | null | deploy.py | oneofthezombies/argparse-best-practice | c29bffb014bb66d7694e4e27ad2911cc5abb3eef | [
"MIT"
] | null | null | null | from shutil import rmtree
from subprocess import run
from pathlib import Path
from itertools import chain
from more_itertools import consume
dirnames = ['build', 'dist']
paths = map(lambda path: Path(path), dirnames)
outputs = chain(paths, Path().glob('*.egg-info'))
exists = filter(lambda path: path.exists(), outputs)
deletes = map(lambda path: rmtree(path), exists)
consume(deletes)
run(['python3', 'setup.py', 'sdist', 'bdist_wheel']).check_returncode()
run(['python3', '-m', 'twine', 'upload', 'dist/*']).check_returncode()
| 31.294118 | 71 | 0.721805 | from shutil import rmtree
from subprocess import run
from pathlib import Path
from itertools import chain
from more_itertools import consume
dirnames = ['build', 'dist']
paths = map(lambda path: Path(path), dirnames)
outputs = chain(paths, Path().glob('*.egg-info'))
exists = filter(lambda path: path.exists(), outputs)
deletes = map(lambda path: rmtree(path), exists)
consume(deletes)
run(['python3', 'setup.py', 'sdist', 'bdist_wheel']).check_returncode()
run(['python3', '-m', 'twine', 'upload', 'dist/*']).check_returncode()
| true | true |
f723ea0c1c6678a854eaf1b914ce06a246ec9ada | 32,902 | py | Python | api/registrations/serializers.py | mattclarkcos/osf.io | 0e5ee8c0ff2a9bc7449061124ac8ce6d00f775ca | [
"Apache-2.0"
] | null | null | null | api/registrations/serializers.py | mattclarkcos/osf.io | 0e5ee8c0ff2a9bc7449061124ac8ce6d00f775ca | [
"Apache-2.0"
] | 4 | 2022-02-26T03:28:02.000Z | 2022-03-08T23:36:45.000Z | api/registrations/serializers.py | mattclarkcos/osf.io | 0e5ee8c0ff2a9bc7449061124ac8ce6d00f775ca | [
"Apache-2.0"
] | null | null | null | import pytz
import json
from unicodedata import normalize
from distutils.version import StrictVersion
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict, InvalidModelValueError, JSONAPIException
from api.base.serializers import is_anonymized
from api.base.utils import absolute_reverse, get_user_auth, is_truthy
from api.base.versioning import CREATE_REGISTRATION_FIELD_CHANGE_VERSION
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import (
NodeSerializer,
NodeStorageProviderSerializer,
NodeLicenseRelationshipField,
NodeLinksSerializer,
update_institutions,
NodeLicenseSerializer,
NodeContributorsSerializer,
RegistrationProviderRelationshipField,
get_license_details,
)
from api.base.serializers import (
IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
ShowIfVersion, VersionedDateTimeField, ValuesListField,
)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError, NodeStateError
from osf.models import Node, AbstractNode
from osf.utils.registrations import strip_registered_meta_comments
from framework.sentry import log_exception
class RegistrationSerializer(NodeSerializer):
admin_only_editable_fields = [
'custom_citation',
'is_pending_retraction',
'is_public',
'withdrawal_justification',
]
# Remember to add new RegistrationSerializer fields to this list
# if you don't need them to be anonymized
non_anonymized_fields = NodeSerializer.non_anonymized_fields + [
'archiving',
'article_doi',
'date_registered',
'date_withdrawn',
'embargo_end_date',
'embargoed',
'pending_embargo_approval',
'pending_embargo_termination_approval',
'pending_registration_approval',
'pending_withdrawal',
'provider',
'registered_by',
'registered_from',
'registered_meta',
'registration_responses',
'registration_schema',
'registration_supplement',
'withdrawal_justification',
'withdrawn',
]
reviews_state = ser.CharField(source='moderation_state', read_only=True)
title = ser.CharField(read_only=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string)
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True))
node_license = HideIfWithdrawal(NodeLicenseSerializer(required=False, source='license'))
tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False))
article_doi = ser.CharField(required=False, allow_null=True)
public = HideIfWithdrawal(ser.BooleanField(
source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes',
))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(
help_text='List of strings representing the permissions '
'for the current user on this node.',
))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.',
))
pending_embargo_termination_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo_termination',
help_text='The associated Embargo early termination is awaiting approval by project admins',
))
embargoed = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_embargoed'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(
source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.',
))
archiving = HideIfWithdrawal(ser.BooleanField(read_only=True))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawn = ser.BooleanField(
source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.',
)
has_project = ser.SerializerMethodField()
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
custom_citation = HideIfWithdrawal(ser.CharField(allow_blank=True, required=False))
withdrawal_justification = ser.CharField(read_only=True)
template_from = HideIfWithdrawal(ser.CharField(
read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.',
))
registration_supplement = ser.SerializerMethodField()
# Will be deprecated in favor of registration_responses
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registration_responses = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'},
))
registered_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'},
)
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={
'unread': 'get_unread_comments_count',
'count': 'get_total_comments_count',
},
filter={'target': '<_id>'},
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'},
)
bibliographic_contributors = RelationshipField(
related_view='registrations:registration-bibliographic-contributors',
related_view_kwargs={'node_id': '<_id>'},
)
implicit_contributors = RelationshipField(
related_view='registrations:registration-implicit-contributors',
related_view_kwargs={'node_id': '<_id>'},
help_text='This feature is experimental and being tested. It may be deprecated.',
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-storage-providers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_files_count'},
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_wiki_page_count'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'},
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'},
))
license = HideIfWithdrawal(NodeLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False,
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_forks_count'},
))
groups = HideIfRegistration(RelationshipField(
related_view='nodes:node-groups',
related_view_kwargs={'node_id': '<_id>'},
))
node_links = ShowIfVersion(
HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.',
)), min_version='2.0', max_version='2.0',
)
linked_by_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_nodes_count'},
))
linked_by_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_registrations_count'},
))
parent = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node',
)
root = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'},
)
region = HideIfWithdrawal(RelationshipField(
related_view='regions:region-detail',
related_view_kwargs={'region_id': '<osfstorage_region._id>'},
read_only=True,
))
affiliated_institutions = RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'},
self_view='registrations:registration-relationships-institutions',
self_view_kwargs={'node_id': '<_id>'},
read_only=False,
many=True,
required=False,
)
registration_schema = RelationshipField(
related_view='schemas:registration-schema-detail',
related_view_kwargs={'schema_id': '<registered_schema_id>'},
)
settings = HideIfRegistration(RelationshipField(
related_view='nodes:node-settings',
related_view_kwargs={'node_id': '<_id>'},
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'},
)))
identifiers = RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'},
)
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'},
))
provider = RegistrationProviderRelationshipField(
related_view='providers:registration-providers:registration-provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=True,
)
review_actions = RelationshipField(
related_view='registrations:registration-actions-list',
related_view_kwargs={'node_id': '<_id>'},
)
requests = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-requests-list',
related_view_kwargs={'node_id': '<_id>'},
))
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-subjects'
@property
def subjects_self_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-relationships-subjects'
links = LinksField({'html': 'get_absolute_html_url'})
def get_has_project(self, obj):
return obj.has_project
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = self.anonymize_registered_meta(obj)
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_registration_responses(self, obj):
if obj.registration_responses:
return self.anonymize_registration_responses(obj)
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def get_view_only_links_count(self, obj):
return obj.private_links.filter(is_deleted=False).count()
def get_total_comments_count(self, obj):
return obj.comment_set.filter(page='node', is_deleted=False).count()
def get_files_count(self, obj):
return obj.files_count or 0
def anonymize_registered_meta(self, obj):
"""
Looks at every question on every page of the schema, for any titles
that have a contributor-input block type. If present, deletes that question's response
from meta_values.
"""
cleaned_registered_meta = strip_registered_meta_comments(list(obj.registered_meta.values())[0])
return self.anonymize_fields(obj, cleaned_registered_meta)
def anonymize_registration_responses(self, obj):
"""
For any questions that have a `contributor-input` block type, delete
that question's response from registration_responses.
We want to make sure author's names that need to be anonymized
aren't surfaced when viewed through an anonymous VOL
"""
return self.anonymize_fields(obj, obj.registration_responses)
def anonymize_fields(self, obj, data):
"""
Consolidates logic to anonymize fields with contributor information
on both registered_meta and registration_responses
"""
if is_anonymized(self.context['request']):
anonymous_registration_response_keys = obj.get_contributor_registration_response_keys()
for key in anonymous_registration_response_keys:
if key in data:
del data[key]
return data
def check_admin_perms(self, registration, user, validated_data):
"""
While admin/write users can make both make modifications to registrations,
most fields are restricted to admin-only edits. You must be an admin
contributor on the registration; you cannot have gotten your admin
permissions through group membership.
Add fields that need admin perms to admin_only_editable_fields
"""
user_is_admin = registration.is_admin_contributor(user)
for field in validated_data:
if field in self.admin_only_editable_fields and not user_is_admin:
raise exceptions.PermissionDenied()
def update_registration_tags(self, registration, validated_data, auth):
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(str(err))
def retract_registration(self, registration, validated_data, user):
is_pending_retraction = validated_data.pop('is_pending_retraction', None)
withdrawal_justification = validated_data.pop('withdrawal_justification', None)
if withdrawal_justification and not is_pending_retraction:
raise exceptions.ValidationError(
'You cannot provide a withdrawal_justification without a concurrent withdrawal request.',
)
if is_truthy(is_pending_retraction):
if registration.is_pending_retraction:
raise exceptions.ValidationError('This registration is already pending withdrawal.')
try:
retraction = registration.retract_registration(user, withdrawal_justification, save=True)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
retraction.ask(registration.get_active_contributors_recursive(unique_users=True))
elif is_pending_retraction is not None:
raise exceptions.ValidationError('You cannot set is_pending_withdrawal to False.')
def update(self, registration, validated_data):
user = self.context['request'].user
auth = Auth(user)
self.check_admin_perms(registration, user, validated_data)
validated_data.pop('_id', None)
if 'tags' in validated_data:
self.update_registration_tags(registration, validated_data, auth)
if 'custom_citation' in validated_data:
registration.update_custom_citation(validated_data.pop('custom_citation'), auth)
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(registration, validated_data)
validated_data['node_license'] = license_details
validated_data.pop('license_type', None)
validated_data.pop('license', None)
if 'affiliated_institutions' in validated_data:
institutions_list = validated_data.pop('affiliated_institutions')
new_institutions = [{'_id': institution} for institution in institutions_list]
update_institutions(registration, new_institutions, user)
registration.save()
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.update_subjects(registration, subjects, auth)
if 'withdrawal_justification' in validated_data or 'is_pending_retraction' in validated_data:
self.retract_registration(registration, validated_data, user)
if 'is_public' in validated_data:
if validated_data.get('is_public') is False:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
try:
registration.update(validated_data, auth=auth)
except ValidationError as e:
raise InvalidModelValueError(detail=e.messages[0])
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
return registration
class Meta:
type_ = 'registrations'
class RegistrationCreateSerializer(RegistrationSerializer):
"""
Overrides RegistrationSerializer to add draft_registration, registration_choice, and lift_embargo fields -
"""
def expect_cleaner_attributes(self, request):
return StrictVersion(getattr(request, 'version', '2.0')) >= StrictVersion(CREATE_REGISTRATION_FIELD_CHANGE_VERSION)
def __init__(self, *args, **kwargs):
super(RegistrationCreateSerializer, self).__init__(*args, **kwargs)
request = kwargs['context']['request']
# required fields defined here for the different versions
if self.expect_cleaner_attributes(request):
self.fields['draft_registration_id'] = ser.CharField(write_only=True)
else:
self.fields['draft_registration'] = ser.CharField(write_only=True)
# For newer versions
embargo_end_date = VersionedDateTimeField(write_only=True, allow_null=True, default=None)
included_node_ids = ser.ListField(write_only=True, required=False)
# For older versions
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
children = ser.ListField(write_only=True, required=False)
registration_choice = ser.ChoiceField(write_only=True, required=False, choices=['immediate', 'embargo'])
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
always_embed=True,
required=False,
)
def get_registration_choice_by_version(self, validated_data):
"""
Old API versions should pass in "immediate" or "embargo" under `registration_choice`.
New API versions should pass in an "embargo_end_date" if it should be embargoed, else it will be None
"""
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('registration_choice'):
raise JSONAPIException(
source={'pointer': '/data/attributes/registration_choice'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return 'embargo' if validated_data.get('embargo_end_date', None) else 'immediate'
return validated_data.get('registration_choice', 'immediate')
def get_embargo_end_date_by_version(self, validated_data):
"""
Old API versions should pass in "lift_embargo".
New API versions should pass in "embargo_end_date"
"""
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('lift_embargo'):
raise JSONAPIException(
source={'pointer': '/data/attributes/lift_embargo'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return validated_data.get('embargo_end_date', None)
return validated_data.get('lift_embargo')
def get_children_by_version(self, validated_data):
"""
Old API versions should pass in 'children'
New API versions should pass in 'included_node_ids'.
"""
if self.expect_cleaner_attributes(self.context['request']):
return validated_data.get('included_node_ids', [])
return validated_data.get('children', [])
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft', None)
registration_choice = self.get_registration_choice_by_version(validated_data)
embargo_lifted = self.get_embargo_end_date_by_version(validated_data)
children = self.get_children_by_version(validated_data)
if children:
# First check that all children are valid
child_nodes = Node.objects.filter(guids___id__in=children)
if child_nodes.count() != len(children):
raise exceptions.ValidationError('Some child nodes could not be found.')
# Second check that metadata doesn't have files that are not in the child nodes being registered.
registering = children + [draft.branched_from._id]
orphan_files = self._find_orphan_files(registering, draft)
if orphan_files:
orphan_files_names = [file_data['selectedFileName'] for file_data in orphan_files]
raise exceptions.ValidationError('All files attached to this form must be registered to complete the process. '
'The following file(s) are attached, but are not part of a component being'
' registered: {}'.format(', '.join(orphan_files_names)))
try:
# Still validating metadata, but whether `registration_responses` or `registration_metadata` were populated
# on the draft, the other field was built and populated as well. Both should exist.
draft.validate_metadata(metadata=draft.registration_metadata, required_fields=True)
except ValidationValueError:
log_exception() # Probably indicates a bug on our end, so log to sentry
# TODO: Raise an error once our JSON schemas are updated
try:
registration = draft.register(auth, save=True, child_ids=children)
except NodeStateError as err:
raise exceptions.ValidationError(err)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def _find_orphan_files(self, registering, draft):
from website.archiver.utils import find_selected_files
files = find_selected_files(draft.registration_schema, draft.registration_metadata)
orphan_files = []
for key, value in files.items():
if 'extra' in value:
for file_metadata in value['extra']:
if not self._is_attached_file_valid(file_metadata, registering):
orphan_files.append(file_metadata)
return orphan_files
def _is_attached_file_valid(self, file_metadata, registering):
"""
Validation of file information on registration_metadata. Theoretically, the file information
on registration_responses does not have to be valid, so we enforce their accuracy here,
to ensure file links load properly.
Verifying that nodeId in the file_metadata is one of the files we're registering. Verify
that selectedFileName is the name of a file on the node. Verify that the sha256 matches
a version on that file.
:param file_metadata - under "registration_metadata"
:param registering - node ids you are registering
:return boolean
"""
node_id = file_metadata.get('nodeId')
if node_id not in registering:
return False
node = AbstractNode.load(node_id)
if not node:
# node in registration_metadata doesn't exist
return False
specified_sha = file_metadata.get('sha256', '')
file = node.files.filter(name=normalize('NFD', file_metadata.get('selectedFileName', ''))).first() or \
node.files.filter(name=normalize('NFC', file_metadata.get('selectedFileName', ''))).first()
if not file:
# file with this name does not exist on the node
return False
match = False
for version in file.versions.all():
if specified_sha == version.metadata.get('sha256'):
match = True
if not match:
# Specified sha256 does not match a version on the specified file
return False
return True
class RegistrationDetailSerializer(RegistrationSerializer):
"""
Overrides RegistrationSerializer make _id required and other fields writeable
"""
id = IDField(source='_id', required=True)
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', required=False,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawal_justification = ser.CharField(required=False)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
)
comments = FileRelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<target._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'},
)
node = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<target._id>'},
help_text='The registration that this file belongs to',
)
class RegistrationStorageProviderSerializer(NodeStorageProviderSerializer):
"""
Overrides NodeStorageProviderSerializer to lead to correct registration file links
"""
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True,
)
| 42.619171 | 133 | 0.687891 | import pytz
import json
from unicodedata import normalize
from distutils.version import StrictVersion
from django.core.exceptions import ValidationError
from rest_framework import serializers as ser
from rest_framework import exceptions
from api.base.exceptions import Conflict, InvalidModelValueError, JSONAPIException
from api.base.serializers import is_anonymized
from api.base.utils import absolute_reverse, get_user_auth, is_truthy
from api.base.versioning import CREATE_REGISTRATION_FIELD_CHANGE_VERSION
from website.project.model import NodeUpdateError
from api.files.serializers import OsfStorageFileSerializer
from api.nodes.serializers import (
NodeSerializer,
NodeStorageProviderSerializer,
NodeLicenseRelationshipField,
NodeLinksSerializer,
update_institutions,
NodeLicenseSerializer,
NodeContributorsSerializer,
RegistrationProviderRelationshipField,
get_license_details,
)
from api.base.serializers import (
IDField, RelationshipField, LinksField, HideIfWithdrawal,
FileRelationshipField, NodeFileHyperLinkField, HideIfRegistration,
ShowIfVersion, VersionedDateTimeField, ValuesListField,
)
from framework.auth.core import Auth
from osf.exceptions import ValidationValueError, NodeStateError
from osf.models import Node, AbstractNode
from osf.utils.registrations import strip_registered_meta_comments
from framework.sentry import log_exception
class RegistrationSerializer(NodeSerializer):
admin_only_editable_fields = [
'custom_citation',
'is_pending_retraction',
'is_public',
'withdrawal_justification',
]
non_anonymized_fields = NodeSerializer.non_anonymized_fields + [
'archiving',
'article_doi',
'date_registered',
'date_withdrawn',
'embargo_end_date',
'embargoed',
'pending_embargo_approval',
'pending_embargo_termination_approval',
'pending_registration_approval',
'pending_withdrawal',
'provider',
'registered_by',
'registered_from',
'registered_meta',
'registration_responses',
'registration_schema',
'registration_supplement',
'withdrawal_justification',
'withdrawn',
]
reviews_state = ser.CharField(source='moderation_state', read_only=True)
title = ser.CharField(read_only=True)
description = ser.CharField(required=False, allow_blank=True, allow_null=True)
category_choices = NodeSerializer.category_choices
category_choices_string = NodeSerializer.category_choices_string
category = ser.ChoiceField(required=False, choices=category_choices, help_text='Choices: ' + category_choices_string)
date_modified = VersionedDateTimeField(source='last_logged', read_only=True)
fork = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_fork'))
collection = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_collection'))
access_requests_enabled = HideIfWithdrawal(ser.BooleanField(read_only=True))
node_license = HideIfWithdrawal(NodeLicenseSerializer(required=False, source='license'))
tags = HideIfWithdrawal(ValuesListField(attr_name='name', child=ser.CharField(), required=False))
article_doi = ser.CharField(required=False, allow_null=True)
public = HideIfWithdrawal(ser.BooleanField(
source='is_public', required=False,
help_text='Nodes that are made public will give read-only access '
'to everyone. Private nodes require explicit read '
'permission. Write and admin access are the same for '
'public and private nodes. Administrators on a parent '
'node have implicit read permissions for all child nodes',
))
current_user_permissions = HideIfWithdrawal(ser.SerializerMethodField(
help_text='List of strings representing the permissions '
'for the current user on this node.',
))
pending_embargo_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo',
help_text='The associated Embargo is awaiting approval by project admins.',
))
pending_embargo_termination_approval = HideIfWithdrawal(ser.BooleanField(
read_only=True, source='is_pending_embargo_termination',
help_text='The associated Embargo early termination is awaiting approval by project admins',
))
embargoed = HideIfWithdrawal(ser.BooleanField(read_only=True, source='is_embargoed'))
pending_registration_approval = HideIfWithdrawal(ser.BooleanField(
source='is_pending_registration', read_only=True,
help_text='The associated RegistrationApproval is awaiting approval by project admins.',
))
archiving = HideIfWithdrawal(ser.BooleanField(read_only=True))
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', read_only=True,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawn = ser.BooleanField(
source='is_retracted', read_only=True,
help_text='The registration has been withdrawn.',
)
has_project = ser.SerializerMethodField()
date_registered = VersionedDateTimeField(source='registered_date', read_only=True, help_text='Date time of registration.')
date_withdrawn = VersionedDateTimeField(read_only=True, help_text='Date time of when this registration was retracted.')
embargo_end_date = HideIfWithdrawal(ser.SerializerMethodField(help_text='When the embargo on this registration will be lifted.'))
custom_citation = HideIfWithdrawal(ser.CharField(allow_blank=True, required=False))
withdrawal_justification = ser.CharField(read_only=True)
template_from = HideIfWithdrawal(ser.CharField(
read_only=True, allow_blank=False, allow_null=False,
help_text='Specify a node id for a node you would like to use as a template for the '
'new node. Templating is like forking, except that you do not copy the '
'files, only the project structure. Some information is changed on the top '
'level project by submitting the appropriate fields in the request body, '
'and some information will not change. By default, the description will '
'be cleared and the project will be made private.',
))
registration_supplement = ser.SerializerMethodField()
# Will be deprecated in favor of registration_responses
registered_meta = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registration_responses = HideIfWithdrawal(ser.SerializerMethodField(
help_text='A dictionary with supplemental registration questions and responses.',
))
registered_by = HideIfWithdrawal(RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<registered_user._id>'},
))
registered_from = RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<registered_from._id>'},
)
children = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-children',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_count'},
))
comments = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<_id>'},
related_meta={
'unread': 'get_unread_comments_count',
'count': 'get_total_comments_count',
},
filter={'target': '<_id>'},
))
contributors = RelationshipField(
related_view='registrations:registration-contributors',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_contrib_count'},
)
bibliographic_contributors = RelationshipField(
related_view='registrations:registration-bibliographic-contributors',
related_view_kwargs={'node_id': '<_id>'},
)
implicit_contributors = RelationshipField(
related_view='registrations:registration-implicit-contributors',
related_view_kwargs={'node_id': '<_id>'},
help_text='This feature is experimental and being tested. It may be deprecated.',
)
files = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-storage-providers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_files_count'},
))
wikis = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-wikis',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_wiki_page_count'},
))
forked_from = HideIfWithdrawal(RelationshipField(
related_view=lambda n: 'registrations:registration-detail' if getattr(n, 'is_registration', False) else 'nodes:node-detail',
related_view_kwargs={'node_id': '<forked_from_id>'},
))
template_node = HideIfWithdrawal(RelationshipField(
related_view='nodes:node-detail',
related_view_kwargs={'node_id': '<template_node._id>'},
))
license = HideIfWithdrawal(NodeLicenseRelationshipField(
related_view='licenses:license-detail',
related_view_kwargs={'license_id': '<license.node_license._id>'},
read_only=False,
))
logs = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-logs',
related_view_kwargs={'node_id': '<_id>'},
))
forks = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-forks',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_forks_count'},
))
groups = HideIfRegistration(RelationshipField(
related_view='nodes:node-groups',
related_view_kwargs={'node_id': '<_id>'},
))
node_links = ShowIfVersion(
HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-pointers',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_pointers_count'},
help_text='This feature is deprecated as of version 2.1. Use linked_nodes instead.',
)), min_version='2.0', max_version='2.0',
)
linked_by_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_nodes_count'},
))
linked_by_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-linked-by-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_linked_by_registrations_count'},
))
parent = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<parent_node._id>'},
filter_key='parent_node',
)
root = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<root._id>'},
)
region = HideIfWithdrawal(RelationshipField(
related_view='regions:region-detail',
related_view_kwargs={'region_id': '<osfstorage_region._id>'},
read_only=True,
))
affiliated_institutions = RelationshipField(
related_view='registrations:registration-institutions',
related_view_kwargs={'node_id': '<_id>'},
self_view='registrations:registration-relationships-institutions',
self_view_kwargs={'node_id': '<_id>'},
read_only=False,
many=True,
required=False,
)
registration_schema = RelationshipField(
related_view='schemas:registration-schema-detail',
related_view_kwargs={'schema_id': '<registered_schema_id>'},
)
settings = HideIfRegistration(RelationshipField(
related_view='nodes:node-settings',
related_view_kwargs={'node_id': '<_id>'},
))
registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
draft_registrations = HideIfRegistration(RelationshipField(
related_view='nodes:node-draft-registrations',
related_view_kwargs={'node_id': '<_id>'},
))
preprints = HideIfWithdrawal(HideIfRegistration(RelationshipField(
related_view='nodes:node-preprints',
related_view_kwargs={'node_id': '<_id>'},
)))
identifiers = RelationshipField(
related_view='registrations:identifier-list',
related_view_kwargs={'node_id': '<_id>'},
)
linked_nodes = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-nodes',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_node_links_count'},
self_view='registrations:node-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
linked_registrations = HideIfWithdrawal(RelationshipField(
related_view='registrations:linked-registrations',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_registration_links_count'},
self_view='registrations:node-registration-pointer-relationship',
self_view_kwargs={'node_id': '<_id>'},
))
view_only_links = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-view-only-links',
related_view_kwargs={'node_id': '<_id>'},
related_meta={'count': 'get_view_only_links_count'},
))
citation = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-citation',
related_view_kwargs={'node_id': '<_id>'},
))
provider = RegistrationProviderRelationshipField(
related_view='providers:registration-providers:registration-provider-detail',
related_view_kwargs={'provider_id': '<provider._id>'},
read_only=True,
)
review_actions = RelationshipField(
related_view='registrations:registration-actions-list',
related_view_kwargs={'node_id': '<_id>'},
)
requests = HideIfWithdrawal(RelationshipField(
related_view='registrations:registration-requests-list',
related_view_kwargs={'node_id': '<_id>'},
))
@property
def subjects_related_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-subjects'
@property
def subjects_self_view(self):
# Overrides TaxonomizableSerializerMixin
return 'registrations:registration-relationships-subjects'
links = LinksField({'html': 'get_absolute_html_url'})
def get_has_project(self, obj):
return obj.has_project
def get_absolute_url(self, obj):
return obj.get_absolute_url()
def get_registered_meta(self, obj):
if obj.registered_meta:
meta_values = self.anonymize_registered_meta(obj)
try:
return json.loads(meta_values)
except TypeError:
return meta_values
except ValueError:
return meta_values
return None
def get_registration_responses(self, obj):
if obj.registration_responses:
return self.anonymize_registration_responses(obj)
return None
def get_embargo_end_date(self, obj):
if obj.embargo_end_date:
return obj.embargo_end_date
return None
def get_registration_supplement(self, obj):
if obj.registered_schema:
schema = obj.registered_schema.first()
if schema is None:
return None
return schema.name
return None
def get_current_user_permissions(self, obj):
return NodeSerializer.get_current_user_permissions(self, obj)
def get_view_only_links_count(self, obj):
return obj.private_links.filter(is_deleted=False).count()
def get_total_comments_count(self, obj):
return obj.comment_set.filter(page='node', is_deleted=False).count()
def get_files_count(self, obj):
return obj.files_count or 0
def anonymize_registered_meta(self, obj):
cleaned_registered_meta = strip_registered_meta_comments(list(obj.registered_meta.values())[0])
return self.anonymize_fields(obj, cleaned_registered_meta)
def anonymize_registration_responses(self, obj):
return self.anonymize_fields(obj, obj.registration_responses)
def anonymize_fields(self, obj, data):
if is_anonymized(self.context['request']):
anonymous_registration_response_keys = obj.get_contributor_registration_response_keys()
for key in anonymous_registration_response_keys:
if key in data:
del data[key]
return data
def check_admin_perms(self, registration, user, validated_data):
user_is_admin = registration.is_admin_contributor(user)
for field in validated_data:
if field in self.admin_only_editable_fields and not user_is_admin:
raise exceptions.PermissionDenied()
def update_registration_tags(self, registration, validated_data, auth):
new_tags = validated_data.pop('tags', [])
try:
registration.update_tags(new_tags, auth=auth)
except NodeStateError as err:
raise Conflict(str(err))
def retract_registration(self, registration, validated_data, user):
is_pending_retraction = validated_data.pop('is_pending_retraction', None)
withdrawal_justification = validated_data.pop('withdrawal_justification', None)
if withdrawal_justification and not is_pending_retraction:
raise exceptions.ValidationError(
'You cannot provide a withdrawal_justification without a concurrent withdrawal request.',
)
if is_truthy(is_pending_retraction):
if registration.is_pending_retraction:
raise exceptions.ValidationError('This registration is already pending withdrawal.')
try:
retraction = registration.retract_registration(user, withdrawal_justification, save=True)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
retraction.ask(registration.get_active_contributors_recursive(unique_users=True))
elif is_pending_retraction is not None:
raise exceptions.ValidationError('You cannot set is_pending_withdrawal to False.')
def update(self, registration, validated_data):
user = self.context['request'].user
auth = Auth(user)
self.check_admin_perms(registration, user, validated_data)
validated_data.pop('_id', None)
if 'tags' in validated_data:
self.update_registration_tags(registration, validated_data, auth)
if 'custom_citation' in validated_data:
registration.update_custom_citation(validated_data.pop('custom_citation'), auth)
if 'license_type' in validated_data or 'license' in validated_data:
license_details = get_license_details(registration, validated_data)
validated_data['node_license'] = license_details
validated_data.pop('license_type', None)
validated_data.pop('license', None)
if 'affiliated_institutions' in validated_data:
institutions_list = validated_data.pop('affiliated_institutions')
new_institutions = [{'_id': institution} for institution in institutions_list]
update_institutions(registration, new_institutions, user)
registration.save()
if 'subjects' in validated_data:
subjects = validated_data.pop('subjects', None)
self.update_subjects(registration, subjects, auth)
if 'withdrawal_justification' in validated_data or 'is_pending_retraction' in validated_data:
self.retract_registration(registration, validated_data, user)
if 'is_public' in validated_data:
if validated_data.get('is_public') is False:
raise exceptions.ValidationError('Registrations can only be turned from private to public.')
try:
registration.update(validated_data, auth=auth)
except ValidationError as e:
raise InvalidModelValueError(detail=e.messages[0])
except NodeUpdateError as err:
raise exceptions.ValidationError(err.reason)
except NodeStateError as err:
raise exceptions.ValidationError(str(err))
return registration
class Meta:
type_ = 'registrations'
class RegistrationCreateSerializer(RegistrationSerializer):
def expect_cleaner_attributes(self, request):
return StrictVersion(getattr(request, 'version', '2.0')) >= StrictVersion(CREATE_REGISTRATION_FIELD_CHANGE_VERSION)
def __init__(self, *args, **kwargs):
super(RegistrationCreateSerializer, self).__init__(*args, **kwargs)
request = kwargs['context']['request']
# required fields defined here for the different versions
if self.expect_cleaner_attributes(request):
self.fields['draft_registration_id'] = ser.CharField(write_only=True)
else:
self.fields['draft_registration'] = ser.CharField(write_only=True)
# For newer versions
embargo_end_date = VersionedDateTimeField(write_only=True, allow_null=True, default=None)
included_node_ids = ser.ListField(write_only=True, required=False)
# For older versions
lift_embargo = VersionedDateTimeField(write_only=True, default=None, input_formats=['%Y-%m-%dT%H:%M:%S'])
children = ser.ListField(write_only=True, required=False)
registration_choice = ser.ChoiceField(write_only=True, required=False, choices=['immediate', 'embargo'])
users = RelationshipField(
related_view='users:user-detail',
related_view_kwargs={'user_id': '<user._id>'},
always_embed=True,
required=False,
)
def get_registration_choice_by_version(self, validated_data):
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('registration_choice'):
raise JSONAPIException(
source={'pointer': '/data/attributes/registration_choice'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return 'embargo' if validated_data.get('embargo_end_date', None) else 'immediate'
return validated_data.get('registration_choice', 'immediate')
def get_embargo_end_date_by_version(self, validated_data):
if self.expect_cleaner_attributes(self.context['request']):
if validated_data.get('lift_embargo'):
raise JSONAPIException(
source={'pointer': '/data/attributes/lift_embargo'},
detail=f'Deprecated in version {CREATE_REGISTRATION_FIELD_CHANGE_VERSION}. Use embargo_end_date instead.',
)
return validated_data.get('embargo_end_date', None)
return validated_data.get('lift_embargo')
def get_children_by_version(self, validated_data):
if self.expect_cleaner_attributes(self.context['request']):
return validated_data.get('included_node_ids', [])
return validated_data.get('children', [])
def create(self, validated_data):
auth = get_user_auth(self.context['request'])
draft = validated_data.pop('draft', None)
registration_choice = self.get_registration_choice_by_version(validated_data)
embargo_lifted = self.get_embargo_end_date_by_version(validated_data)
children = self.get_children_by_version(validated_data)
if children:
# First check that all children are valid
child_nodes = Node.objects.filter(guids___id__in=children)
if child_nodes.count() != len(children):
raise exceptions.ValidationError('Some child nodes could not be found.')
# Second check that metadata doesn't have files that are not in the child nodes being registered.
registering = children + [draft.branched_from._id]
orphan_files = self._find_orphan_files(registering, draft)
if orphan_files:
orphan_files_names = [file_data['selectedFileName'] for file_data in orphan_files]
raise exceptions.ValidationError('All files attached to this form must be registered to complete the process. '
'The following file(s) are attached, but are not part of a component being'
' registered: {}'.format(', '.join(orphan_files_names)))
try:
draft.validate_metadata(metadata=draft.registration_metadata, required_fields=True)
except ValidationValueError:
log_exception()
try:
registration = draft.register(auth, save=True, child_ids=children)
except NodeStateError as err:
raise exceptions.ValidationError(err)
if registration_choice == 'embargo':
if not embargo_lifted:
raise exceptions.ValidationError('lift_embargo must be specified.')
embargo_end_date = embargo_lifted.replace(tzinfo=pytz.utc)
try:
registration.embargo_registration(auth.user, embargo_end_date)
except ValidationError as err:
raise exceptions.ValidationError(err.message)
else:
try:
registration.require_approval(auth.user)
except NodeStateError as err:
raise exceptions.ValidationError(err)
registration.save()
return registration
def _find_orphan_files(self, registering, draft):
from website.archiver.utils import find_selected_files
files = find_selected_files(draft.registration_schema, draft.registration_metadata)
orphan_files = []
for key, value in files.items():
if 'extra' in value:
for file_metadata in value['extra']:
if not self._is_attached_file_valid(file_metadata, registering):
orphan_files.append(file_metadata)
return orphan_files
def _is_attached_file_valid(self, file_metadata, registering):
node_id = file_metadata.get('nodeId')
if node_id not in registering:
return False
node = AbstractNode.load(node_id)
if not node:
return False
specified_sha = file_metadata.get('sha256', '')
file = node.files.filter(name=normalize('NFD', file_metadata.get('selectedFileName', ''))).first() or \
node.files.filter(name=normalize('NFC', file_metadata.get('selectedFileName', ''))).first()
if not file:
# file with this name does not exist on the node
return False
match = False
for version in file.versions.all():
if specified_sha == version.metadata.get('sha256'):
match = True
if not match:
# Specified sha256 does not match a version on the specified file
return False
return True
class RegistrationDetailSerializer(RegistrationSerializer):
id = IDField(source='_id', required=True)
pending_withdrawal = HideIfWithdrawal(ser.BooleanField(
source='is_pending_retraction', required=False,
help_text='The registration is awaiting withdrawal approval by project admins.',
))
withdrawal_justification = ser.CharField(required=False)
class RegistrationNodeLinksSerializer(NodeLinksSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-pointer-detail',
kwargs={
'node_link_id': obj._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationContributorsSerializer(NodeContributorsSerializer):
def get_absolute_url(self, obj):
return absolute_reverse(
'registrations:registration-contributor-detail',
kwargs={
'user_id': obj.user._id,
'node_id': self.context['request'].parser_context['kwargs']['node_id'],
'version': self.context['request'].parser_context['kwargs']['version'],
},
)
class RegistrationFileSerializer(OsfStorageFileSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
)
comments = FileRelationshipField(
related_view='registrations:registration-comments',
related_view_kwargs={'node_id': '<target._id>'},
related_meta={'unread': 'get_unread_comments_count'},
filter={'target': 'get_file_guid'},
)
node = RelationshipField(
related_view='registrations:registration-detail',
related_view_kwargs={'node_id': '<target._id>'},
help_text='The registration that this file belongs to',
)
class RegistrationStorageProviderSerializer(NodeStorageProviderSerializer):
files = NodeFileHyperLinkField(
related_view='registrations:registration-files',
related_view_kwargs={'node_id': '<target._id>', 'path': '<path>', 'provider': '<provider>'},
kind='folder',
never_embed=True,
)
| true | true |
f723eb375382b38526c20e9d0239da24728a5d3d | 698 | py | Python | tests/nested_foreign_keys/models.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/nested_foreign_keys/models.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | tests/nested_foreign_keys/models.py | Yoann-Vie/esgi-hearthstone | 115d03426c7e8e80d89883b78ac72114c29bed12 | [
"PSF-2.0",
"BSD-3-Clause"
] | null | null | null | from django.db import models
class Person(models.Model):
name = models.CharField(max_length=200)
class Movie(models.Model):
title = models.CharField(max_length=200)
director = models.ForeignKey(Person, models.CASCADE)
class Event(models.Model):
pass
class Screening(Event):
movie = models.ForeignKey(Movie, models.CASCADE)
class ScreeningNullFK(Event):
movie = models.ForeignKey(Movie, models.SET_NULL, null=True)
class Package(models.Model):
screening = models.ForeignKey(Screening, models.SET_NULL, null=True)
class PackageNullFK(models.Model):
screening = models.ForeignKey(ScreeningNullFK, models.SET_NULL, null=True)
| 22.516129 | 79 | 0.717765 | from django.db import models
class Person(models.Model):
name = models.CharField(max_length=200)
class Movie(models.Model):
title = models.CharField(max_length=200)
director = models.ForeignKey(Person, models.CASCADE)
class Event(models.Model):
pass
class Screening(Event):
movie = models.ForeignKey(Movie, models.CASCADE)
class ScreeningNullFK(Event):
movie = models.ForeignKey(Movie, models.SET_NULL, null=True)
class Package(models.Model):
screening = models.ForeignKey(Screening, models.SET_NULL, null=True)
class PackageNullFK(models.Model):
screening = models.ForeignKey(ScreeningNullFK, models.SET_NULL, null=True)
| true | true |
f723eb9b8474a649714e9207663dcbb042ef7f19 | 14,422 | py | Python | scipy/optimize/tests/test_linesearch.py | ischrot/scipy_rmt_bsc | 1dd8f7f0ee7ac1311ed1735ca6b6025150524418 | [
"BSD-3-Clause"
] | null | null | null | scipy/optimize/tests/test_linesearch.py | ischrot/scipy_rmt_bsc | 1dd8f7f0ee7ac1311ed1735ca6b6025150524418 | [
"BSD-3-Clause"
] | null | null | null | scipy/optimize/tests/test_linesearch.py | ischrot/scipy_rmt_bsc | 1dd8f7f0ee7ac1311ed1735ca6b6025150524418 | [
"BSD-3-Clause"
] | null | null | null | """
Tests for line search routines
"""
from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_warns,
suppress_warnings)
import scipy.optimize.linesearch as ls
import scipy.optimize.nonlin as nl #(LS)
from scipy.linalg import norm
from scipy.optimize.linesearch import LineSearchWarning
import numpy as np
from copy import deepcopy # (IS)
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
"""
Check that strong Wolfe conditions apply
"""
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg)
def assert_armijo(s, phi, c1=1e-4, err_msg=""):
"""
Check that Armijo condition applies
"""
phi1 = phi(s)
phi0 = phi(0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg)
assert_(phi1 <= (1 - c1*s)*phi0, msg)
###(LS)###
def assert_rmt(alpha, dx, F0, Fx_new, jacobian, param, c1=1e-4, err_msg=""):
"""
Check that RMT condition applies
"""
parameters = ls.prepare_parameters('rmt',param,jacobian,dx)
rmt_eta_upper = parameters['rmt_eta_upper']
rmt_eta_lower = parameters['rmt_eta_lower']
amin = parameters['amin']
#Step 1: Eval t_dx_omega
dxbar = jacobian.solve(
Fx_new
)
dx_diff = dxbar + (1 - alpha) * dx # note that dx = - J(x_k)^(-1)F(x_k)
nominator = 2 * norm(dx_diff)
denominator = alpha * norm(dx)
t_dx_omega = nominator / denominator
tester = (rmt_eta_lower <= t_dx_omega and t_dx_omega <= rmt_eta_upper) or (rmt_eta_lower > t_dx_omega and alpha == 1.0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, F0, Fx_new, err_msg)
assert_(tester or (alpha<amin), msg)
def assert_bsc(alpha, x, dx, func, old_jacobian, param, err_msg):
parameters = ls.prepare_parameters('bsc',param, old_jacobian, dx)
H_lower = parameters['H_lower']
H_upper = parameters['H_upper']
amin = parameters['amin']
x_new = x + alpha * dx
Fx_new = func(x_new)
jacobian = deepcopy(old_jacobian)
jacobian.update(
x_new.copy(),
Fx_new
)
dx_next_it = -jacobian.solve(
Fx_new
)
dx_diff = dx_next_it - dx
H_prime = alpha * norm(dx_diff)
tester = (H_lower <= H_prime and H_prime <= H_upper) or (H_lower > H_prime and alpha >= 1.0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, func(x), Fx_new, err_msg)
assert_(tester or (alpha<amin), msg)
###(LS)###
def assert_line_wolfe(x, p, s, f, fprime, **kw):
assert_wolfe(s, phi=lambda sp: f(x + p*sp),
derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
def assert_line_armijo(x, p, s, f, **kw):
assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
def assert_fp_equal(x, y, err_msg="", nulp=50):
"""Assert two arrays are equal, up to some floating-point rounding error"""
try:
assert_array_almost_equal_nulp(x, y, nulp)
except AssertionError as e:
raise AssertionError("%s\n%s" % (e, err_msg)) from e
class TestLineSearch(object):
# -- scalar functions; must have dphi(0.) < 0
def _scalar_func_1(self, s):
self.fcount += 1
p = -s - s**3 + s**4
dp = -1 - 3*s**2 + 4*s**3
return p, dp
def _scalar_func_2(self, s):
self.fcount += 1
p = np.exp(-4*s) + s**2
dp = -4*np.exp(-4*s) + 2*s
return p, dp
def _scalar_func_3(self, s):
self.fcount += 1
p = -np.sin(10*s)
dp = -10*np.cos(10*s)
return p, dp
# -- n-d functions
def _line_func_1(self, x):
self.fcount += 1
f = np.dot(x, x)
df = 2*x
return f, df
def _line_func_2(self, x):
self.fcount += 1
f = np.dot(x, np.dot(self.A, x)) + 1
df = np.dot(self.A + self.A.T, x)
return f, df
# --
def setup_method(self):
self.scalar_funcs = []
self.line_funcs = []
self.N = 20
self.fcount = 0
def bind_index(func, idx):
# Remember Python's closure semantics!
return lambda *a, **kw: func(*a, **kw)[idx]
for name in sorted(dir(self)):
if name.startswith('_scalar_func_'):
value = getattr(self, name)
self.scalar_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
elif name.startswith('_line_func_'):
value = getattr(self, name)
self.line_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
np.random.seed(1234)
self.A = np.random.randn(self.N, self.N)
def scalar_iter(self):
for name, phi, derphi in self.scalar_funcs:
for old_phi0 in np.random.randn(3):
yield name, phi, derphi, old_phi0
def line_iter(self):
for name, f, fprime in self.line_funcs:
k = 0
while k < 9:
x = np.random.randn(self.N)
p = np.random.randn(self.N)
if np.dot(p, fprime(x)) >= 0:
# always pick a descent direction
continue
k += 1
old_fv = float(np.random.randn())
yield name, f, fprime, x, p, old_fv
# -- Generic scalar searches
def test_scalar_search_wolfe1(self):
c = 0
for name, phi, derphi, old_phi0 in self.scalar_iter():
c += 1
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
assert_wolfe(s, phi, derphi, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_scalar_search_wolfe2(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
phi, derphi, phi(0), old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
if derphi1 is not None:
assert_fp_equal(derphi1, derphi(s), name)
assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0))
def test_scalar_search_wolfe2_with_low_amax(self):
def phi(alpha):
return (alpha - 5) ** 2
def derphi(alpha):
return 2 * (alpha - 5)
s, _, _, _ = assert_warns(LineSearchWarning,
ls.scalar_search_wolfe2, phi, derphi, amax=0.001)
assert_(s is None)
def test_scalar_search_wolfe2_regression(self):
# Regression test for gh-12157
# This phi has its minimum at alpha=4/3 ~ 1.333.
def phi(alpha):
if alpha < 1:
return - 3*np.pi/2 * (alpha - 1)
else:
return np.cos(3*np.pi/2 * alpha - np.pi)
def derphi(alpha):
if alpha < 1:
return - 3*np.pi/2
else:
return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)
s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)
# Without the fix in gh-13073, the scalar_search_wolfe2
# returned s=2.0 instead.
assert_(s < 1.5)
def test_scalar_search_armijo(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
assert_fp_equal(phi1, phi(s), name)
assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0))
###(LS)###
##RMT not usefull for scalar functions, thus no need for test_scalar_search_rmt?
def test_line_search_rmt(self):
#There is at least 1 function R^20->R to be tested, but this leads to s=None
for name, f, fprime, x, p, old_f in self.line_iter():
jac = lambda x: fprime(x)
x0 = nl._as_inexact(x)
func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
x = x0.flatten()
jacobian = nl.asjacobian(jac)
jacobian.setup(x.copy(), f(x), func)
options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}
#print("1: ",f(x),np.shape(fprime(x)))
s, dxbar, f_new = ls.scalar_search_rmt(f, x, fprime(x), parameters=options)
#print("2: ",p_new, s)
assert_fp_equal(f_new, x+s*fprime(x), name)
assert_rmt(s, fprime(x), f(x), f_new, jacobian, options, err_msg="%s %g" % name)
def test_line_search_bsc(self):
#There is at least 1 function R^20->R to be tested, but this leads to s=None
for name, f, fprime, x, p, old_f in self.line_iter():
jac = lambda x: fprime(x)
x0 = nl._as_inexact(x)
func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
x = x0.flatten()
jacobian = nl.asjacobian(jac)
jacobian.setup(x.copy(), f(x), func)
options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}
#print("1: ",f(x),np.shape(dp(x)))
s, f_new= ls.scalar_search_bsc(func, x, fprime(x), f(x), parameters=options)
#print("2: ",p_new, s)
assert_fp_equal(f_new, x+s*fprime(x), name)
assert_bsc(s, x, fprime(x), func, jacobian, options, err_msg="%s %g" % name)
###(LS)###
# -- Generic line searches
def test_line_search_wolfe1(self):
c = 0
smax = 100
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
if s is None:
continue
assert_fp_equal(fv, f(x + s*p))
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2(self):
c = 0
smax = 512
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
with suppress_warnings() as sup:
sup.filter(LineSearchWarning,
"The line search algorithm could not find a solution")
sup.filter(LineSearchWarning,
"The line search algorithm did not converge")
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
assert_fp_equal(fv, f(x + s*p))
if gv is not None:
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
f = lambda x: np.dot(x, x)
fp = lambda x: 2 * x
p = np.array([1, 0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
assert_line_wolfe(x, p, s, f, fp)
s, _, _, _, _, _ = assert_warns(LineSearchWarning,
ls.line_search_wolfe2, f, fp, x, p,
amax=29, c2=c2)
assert_(s is None)
# s=30 will only be tried on the 6th iteration, so this won't converge
assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
c2=c2, maxiter=5)
def test_line_search_armijo(self):
c = 0
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
c += 1
assert_equal(self.fcount, fc)
assert_fp_equal(fv, f(x + s*p))
assert_line_armijo(x, p, s, f, err_msg=name)
assert_(c >= 9)
# -- More specific tests
def test_armijo_terminate_1(self):
# Armijo should evaluate the function only once if the trial step
# is already suitable
count = [0]
def phi(s):
count[0] += 1
return -s + 0.01*s**2
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
assert_equal(s, 1)
assert_equal(count[0], 2)
assert_armijo(s, phi)
def test_wolfe_terminate(self):
# wolfe1 and wolfe2 should also evaluate the function only a few
# times if the trial step is already suitable
def phi(s):
count[0] += 1
return -s + 0.05*s**2
def derphi(s):
count[0] += 1
return -1 + 0.05*2*s
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
count = [0]
r = func(phi, derphi, phi(0), None, derphi(0))
assert_(r[0] is not None, (r, func))
assert_(count[0] <= 2 + 2, (count, func))
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
| 35.348039 | 123 | 0.533491 | from numpy.testing import (assert_, assert_equal, assert_array_almost_equal,
assert_array_almost_equal_nulp, assert_warns,
suppress_warnings)
import scipy.optimize.linesearch as ls
import scipy.optimize.nonlin as nl
from scipy.linalg import norm
from scipy.optimize.linesearch import LineSearchWarning
import numpy as np
from copy import deepcopy
def assert_wolfe(s, phi, derphi, c1=1e-4, c2=0.9, err_msg=""):
phi1 = phi(s)
phi0 = phi(0)
derphi0 = derphi(0)
derphi1 = derphi(s)
msg = "s = %s; phi(0) = %s; phi(s) = %s; phi'(0) = %s; phi'(s) = %s; %s" % (
s, phi0, phi1, derphi0, derphi1, err_msg)
assert_(phi1 <= phi0 + c1*s*derphi0, "Wolfe 1 failed: " + msg)
assert_(abs(derphi1) <= abs(c2*derphi0), "Wolfe 2 failed: " + msg)
def assert_armijo(s, phi, c1=1e-4, err_msg=""):
phi1 = phi(s)
phi0 = phi(0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (s, phi0, phi1, err_msg)
assert_(phi1 <= (1 - c1*s)*phi0, msg)
dx, F0, Fx_new, jacobian, param, c1=1e-4, err_msg=""):
parameters = ls.prepare_parameters('rmt',param,jacobian,dx)
rmt_eta_upper = parameters['rmt_eta_upper']
rmt_eta_lower = parameters['rmt_eta_lower']
amin = parameters['amin']
dxbar = jacobian.solve(
Fx_new
)
dx_diff = dxbar + (1 - alpha) * dx
nominator = 2 * norm(dx_diff)
denominator = alpha * norm(dx)
t_dx_omega = nominator / denominator
tester = (rmt_eta_lower <= t_dx_omega and t_dx_omega <= rmt_eta_upper) or (rmt_eta_lower > t_dx_omega and alpha == 1.0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, F0, Fx_new, err_msg)
assert_(tester or (alpha<amin), msg)
def assert_bsc(alpha, x, dx, func, old_jacobian, param, err_msg):
parameters = ls.prepare_parameters('bsc',param, old_jacobian, dx)
H_lower = parameters['H_lower']
H_upper = parameters['H_upper']
amin = parameters['amin']
x_new = x + alpha * dx
Fx_new = func(x_new)
jacobian = deepcopy(old_jacobian)
jacobian.update(
x_new.copy(),
Fx_new
)
dx_next_it = -jacobian.solve(
Fx_new
)
dx_diff = dx_next_it - dx
H_prime = alpha * norm(dx_diff)
tester = (H_lower <= H_prime and H_prime <= H_upper) or (H_lower > H_prime and alpha >= 1.0)
msg = "s = %s; phi(0) = %s; phi(s) = %s; %s" % (alpha, func(x), Fx_new, err_msg)
assert_(tester or (alpha<amin), msg)
(x, p, s, f, fprime, **kw):
assert_wolfe(s, phi=lambda sp: f(x + p*sp),
derphi=lambda sp: np.dot(fprime(x + p*sp), p), **kw)
def assert_line_armijo(x, p, s, f, **kw):
assert_armijo(s, phi=lambda sp: f(x + p*sp), **kw)
def assert_fp_equal(x, y, err_msg="", nulp=50):
try:
assert_array_almost_equal_nulp(x, y, nulp)
except AssertionError as e:
raise AssertionError("%s\n%s" % (e, err_msg)) from e
class TestLineSearch(object):
def _scalar_func_1(self, s):
self.fcount += 1
p = -s - s**3 + s**4
dp = -1 - 3*s**2 + 4*s**3
return p, dp
def _scalar_func_2(self, s):
self.fcount += 1
p = np.exp(-4*s) + s**2
dp = -4*np.exp(-4*s) + 2*s
return p, dp
def _scalar_func_3(self, s):
self.fcount += 1
p = -np.sin(10*s)
dp = -10*np.cos(10*s)
return p, dp
def _line_func_1(self, x):
self.fcount += 1
f = np.dot(x, x)
df = 2*x
return f, df
def _line_func_2(self, x):
self.fcount += 1
f = np.dot(x, np.dot(self.A, x)) + 1
df = np.dot(self.A + self.A.T, x)
return f, df
def setup_method(self):
self.scalar_funcs = []
self.line_funcs = []
self.N = 20
self.fcount = 0
def bind_index(func, idx):
return lambda *a, **kw: func(*a, **kw)[idx]
for name in sorted(dir(self)):
if name.startswith('_scalar_func_'):
value = getattr(self, name)
self.scalar_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
elif name.startswith('_line_func_'):
value = getattr(self, name)
self.line_funcs.append(
(name, bind_index(value, 0), bind_index(value, 1)))
np.random.seed(1234)
self.A = np.random.randn(self.N, self.N)
def scalar_iter(self):
for name, phi, derphi in self.scalar_funcs:
for old_phi0 in np.random.randn(3):
yield name, phi, derphi, old_phi0
def line_iter(self):
for name, f, fprime in self.line_funcs:
k = 0
while k < 9:
x = np.random.randn(self.N)
p = np.random.randn(self.N)
if np.dot(p, fprime(x)) >= 0:
# always pick a descent direction
continue
k += 1
old_fv = float(np.random.randn())
yield name, f, fprime, x, p, old_fv
# -- Generic scalar searches
def test_scalar_search_wolfe1(self):
c = 0
for name, phi, derphi, old_phi0 in self.scalar_iter():
c += 1
s, phi1, phi0 = ls.scalar_search_wolfe1(phi, derphi, phi(0),
old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
assert_wolfe(s, phi, derphi, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_scalar_search_wolfe2(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1, phi0, derphi1 = ls.scalar_search_wolfe2(
phi, derphi, phi(0), old_phi0, derphi(0))
assert_fp_equal(phi0, phi(0), name)
assert_fp_equal(phi1, phi(s), name)
if derphi1 is not None:
assert_fp_equal(derphi1, derphi(s), name)
assert_wolfe(s, phi, derphi, err_msg="%s %g" % (name, old_phi0))
def test_scalar_search_wolfe2_with_low_amax(self):
def phi(alpha):
return (alpha - 5) ** 2
def derphi(alpha):
return 2 * (alpha - 5)
s, _, _, _ = assert_warns(LineSearchWarning,
ls.scalar_search_wolfe2, phi, derphi, amax=0.001)
assert_(s is None)
def test_scalar_search_wolfe2_regression(self):
# Regression test for gh-12157
# This phi has its minimum at alpha=4/3 ~ 1.333.
def phi(alpha):
if alpha < 1:
return - 3*np.pi/2 * (alpha - 1)
else:
return np.cos(3*np.pi/2 * alpha - np.pi)
def derphi(alpha):
if alpha < 1:
return - 3*np.pi/2
else:
return - 3*np.pi/2 * np.sin(3*np.pi/2 * alpha - np.pi)
s, _, _, _ = ls.scalar_search_wolfe2(phi, derphi)
# Without the fix in gh-13073, the scalar_search_wolfe2
# returned s=2.0 instead.
assert_(s < 1.5)
def test_scalar_search_armijo(self):
for name, phi, derphi, old_phi0 in self.scalar_iter():
s, phi1 = ls.scalar_search_armijo(phi, phi(0), derphi(0))
assert_fp_equal(phi1, phi(s), name)
assert_armijo(s, phi, err_msg="%s %g" % (name, old_phi0))
###(LS)###
##RMT not usefull for scalar functions, thus no need for test_scalar_search_rmt?
def test_line_search_rmt(self):
#There is at least 1 function R^20->R to be tested, but this leads to s=None
for name, f, fprime, x, p, old_f in self.line_iter():
jac = lambda x: fprime(x)
x0 = nl._as_inexact(x)
func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
x = x0.flatten()
jacobian = nl.asjacobian(jac)
jacobian.setup(x.copy(), f(x), func)
options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}
#print("1: ",f(x),np.shape(fprime(x)))
s, dxbar, f_new = ls.scalar_search_rmt(f, x, fprime(x), parameters=options)
#print("2: ",p_new, s)
assert_fp_equal(f_new, x+s*fprime(x), name)
assert_rmt(s, fprime(x), f(x), f_new, jacobian, options, err_msg="%s %g" % name)
def test_line_search_bsc(self):
#There is at least 1 function R^20->R to be tested, but this leads to s=None
for name, f, fprime, x, p, old_f in self.line_iter():
jac = lambda x: fprime(x)
x0 = nl._as_inexact(x)
func = lambda z: nl._as_inexact(f(nl._array_like(z, x0))).flatten()
x = x0.flatten()
jacobian = nl.asjacobian(jac)
jacobian.setup(x.copy(), f(x), func)
options = {'jacobian': jacobian, 'jac_tol': min(1e-03,1e-03*norm(f(x))), 'amin':1e-8}
#print("1: ",f(x),np.shape(dp(x)))
s, f_new= ls.scalar_search_bsc(func, x, fprime(x), f(x), parameters=options)
#print("2: ",p_new, s)
assert_fp_equal(f_new, x+s*fprime(x), name)
assert_bsc(s, x, fprime(x), func, jacobian, options, err_msg="%s %g" % name)
###(LS)###
# -- Generic line searches
def test_line_search_wolfe1(self):
c = 0
smax = 100
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe1(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
if s is None:
continue
assert_fp_equal(fv, f(x + s*p))
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2(self):
c = 0
smax = 512
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
with suppress_warnings() as sup:
sup.filter(LineSearchWarning,
"The line search algorithm could not find a solution")
sup.filter(LineSearchWarning,
"The line search algorithm did not converge")
s, fc, gc, fv, ofv, gv = ls.line_search_wolfe2(f, fprime, x, p,
g0, f0, old_f,
amax=smax)
assert_equal(self.fcount, fc+gc)
assert_fp_equal(ofv, f(x))
assert_fp_equal(fv, f(x + s*p))
if gv is not None:
assert_array_almost_equal(gv, fprime(x + s*p), decimal=14)
if s < smax:
c += 1
assert_line_wolfe(x, p, s, f, fprime, err_msg=name)
assert_(c > 3) # check that the iterator really works...
def test_line_search_wolfe2_bounds(self):
# See gh-7475
# For this f and p, starting at a point on axis 0, the strong Wolfe
# condition 2 is met if and only if the step length s satisfies
# |x + s| <= c2 * |x|
f = lambda x: np.dot(x, x)
fp = lambda x: 2 * x
p = np.array([1, 0])
# Smallest s satisfying strong Wolfe conditions for these arguments is 30
x = -60 * p
c2 = 0.5
s, _, _, _, _, _ = ls.line_search_wolfe2(f, fp, x, p, amax=30, c2=c2)
assert_line_wolfe(x, p, s, f, fp)
s, _, _, _, _, _ = assert_warns(LineSearchWarning,
ls.line_search_wolfe2, f, fp, x, p,
amax=29, c2=c2)
assert_(s is None)
# s=30 will only be tried on the 6th iteration, so this won't converge
assert_warns(LineSearchWarning, ls.line_search_wolfe2, f, fp, x, p,
c2=c2, maxiter=5)
def test_line_search_armijo(self):
c = 0
for name, f, fprime, x, p, old_f in self.line_iter():
f0 = f(x)
g0 = fprime(x)
self.fcount = 0
s, fc, fv = ls.line_search_armijo(f, x, p, g0, f0)
c += 1
assert_equal(self.fcount, fc)
assert_fp_equal(fv, f(x + s*p))
assert_line_armijo(x, p, s, f, err_msg=name)
assert_(c >= 9)
def test_armijo_terminate_1(self):
count = [0]
def phi(s):
count[0] += 1
return -s + 0.01*s**2
s, phi1 = ls.scalar_search_armijo(phi, phi(0), -1, alpha0=1)
assert_equal(s, 1)
assert_equal(count[0], 2)
assert_armijo(s, phi)
def test_wolfe_terminate(self):
def phi(s):
count[0] += 1
return -s + 0.05*s**2
def derphi(s):
count[0] += 1
return -1 + 0.05*2*s
for func in [ls.scalar_search_wolfe1, ls.scalar_search_wolfe2]:
count = [0]
r = func(phi, derphi, phi(0), None, derphi(0))
assert_(r[0] is not None, (r, func))
assert_(count[0] <= 2 + 2, (count, func))
assert_wolfe(r[0], phi, derphi, err_msg=str(func))
| true | true |
f723ece8e845f677ad57a09ea90a361f54d50c23 | 12,408 | bzl | Python | apple/internal/apple_toolchains.bzl | wendyliga/rules_apple | ac43c1e467564d9df6b3355ff93fcaf224f2c0f9 | [
"Apache-2.0"
] | null | null | null | apple/internal/apple_toolchains.bzl | wendyliga/rules_apple | ac43c1e467564d9df6b3355ff93fcaf224f2c0f9 | [
"Apache-2.0"
] | null | null | null | apple/internal/apple_toolchains.bzl | wendyliga/rules_apple | ac43c1e467564d9df6b3355ff93fcaf224f2c0f9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared toolchain required for processing Apple bundling rules."""
AppleMacToolsToolchainInfo = provider(
doc = """
Propagates information about an Apple toolchain to internal bundling rules that use the toolchain.
This provider exists as an internal detail for the rules to reference common, executable tools and
files used as script templates for the purposes of executing Apple actions. Defined by the
`apple_mac_tools_toolchain` rule.
This toolchain is for the tools (and support files) for actions that *must* run on a Mac.
""",
fields = {
"dsym_info_plist_template": """\
A `File` referencing a plist template for dSYM bundles.
""",
"process_and_sign_template": """\
A `File` referencing a template for a shell script to process and sign.
""",
"resolved_alticonstool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to insert alternate icons entries in the app
bundle's `Info.plist`.
""",
"resolved_bundletool_experimental": """\
A `struct` from `ctx.resolve_tools` referencing an experimental tool to create an Apple bundle by
combining the bundling, post-processing, and signing steps into a single action that eliminates the
archiving step.
""",
"resolved_clangrttool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to find all Clang runtime libs linked to a
binary.
""",
"resolved_codesigningtool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to select the appropriate signing identity
for Apple apps and Apple executable bundles.
""",
"resolved_dossier_codesigningtool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to generate codesigning dossiers.
""",
"resolved_environment_plist_tool": """\
A `struct` from `ctx.resolve_tools` referencing a tool for collecting dev environment values.
""",
"resolved_imported_dynamic_framework_processor": """\
A `struct` from `ctx.resolve_tools` referencing a tool to process an imported dynamic framework
such that the given framework only contains the same slices as the app binary, every file belonging
to the dynamic framework is copied to a temporary location, and the dynamic framework is codesigned
and zipped as a cacheable artifact.
""",
"resolved_plisttool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to perform plist operations such as variable
substitution, merging, and conversion of plist files to binary format.
""",
"resolved_provisioning_profile_tool": """\
A `struct` from `ctx.resolve_tools` referencing a tool that extracts entitlements from a
provisioning profile.
""",
"resolved_swift_stdlib_tool": """\
A `struct` from `ctx.resolve_tools` referencing a tool that copies and lipos Swift stdlibs required
for the target to run.
""",
"resolved_xctoolrunner": """\
A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions.
""",
},
)
AppleXPlatToolsToolchainInfo = provider(
doc = """
Propagates information about an Apple toolchain to internal bundling rules that use the toolchain.
This provider exists as an internal detail for the rules to reference common, executable tools and
files used as script templates for the purposes of executing Apple actions. Defined by the
`apple_xplat_tools_toolchain` rule.
This toolchain is for the tools (and support files) for actions that can run on any platform,
i.e. - they do *not* have to run on a Mac.
""",
fields = {
"resolved_bundletool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to create an Apple bundle by taking a list of
files/ZIPs and destinations paths to build the directory structure for those files.
""",
"resolved_versiontool": """\
A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions.
""",
},
)
def _shared_attrs():
"""Private attributes on every rule to provide access to bundling tools and other file deps."""
return {
"_mac_toolchain": attr.label(
default = Label("@build_bazel_rules_apple//apple/internal:mac_tools_toolchain"),
providers = [[AppleMacToolsToolchainInfo]],
),
"_xplat_toolchain": attr.label(
default = Label("@build_bazel_rules_apple//apple/internal:xplat_tools_toolchain"),
providers = [[AppleXPlatToolsToolchainInfo]],
),
}
def _resolve_tools_for_executable(*, rule_ctx, attr_name):
"""Helper macro to resolve executable runfile dependencies across the rule boundary."""
# TODO(b/111036105) Migrate away from this helper and its outputs once ctx.executable works
# across rule boundaries.
executable = getattr(rule_ctx.executable, attr_name)
target = getattr(rule_ctx.attr, attr_name)
inputs, input_manifests = rule_ctx.resolve_tools(tools = [target])
return struct(
executable = executable,
inputs = inputs,
input_manifests = input_manifests,
)
def _apple_mac_tools_toolchain_impl(ctx):
return [
AppleMacToolsToolchainInfo(
dsym_info_plist_template = ctx.file.dsym_info_plist_template,
process_and_sign_template = ctx.file.process_and_sign_template,
resolved_alticonstool = _resolve_tools_for_executable(
attr_name = "alticonstool",
rule_ctx = ctx,
),
resolved_bundletool_experimental = _resolve_tools_for_executable(
attr_name = "bundletool_experimental",
rule_ctx = ctx,
),
resolved_codesigningtool = _resolve_tools_for_executable(
attr_name = "codesigningtool",
rule_ctx = ctx,
),
resolved_dossier_codesigningtool = _resolve_tools_for_executable(
attr_name = "dossier_codesigningtool",
rule_ctx = ctx,
),
resolved_clangrttool = _resolve_tools_for_executable(
attr_name = "clangrttool",
rule_ctx = ctx,
),
resolved_environment_plist_tool = _resolve_tools_for_executable(
attr_name = "environment_plist_tool",
rule_ctx = ctx,
),
resolved_imported_dynamic_framework_processor = _resolve_tools_for_executable(
attr_name = "imported_dynamic_framework_processor",
rule_ctx = ctx,
),
resolved_plisttool = _resolve_tools_for_executable(
attr_name = "plisttool",
rule_ctx = ctx,
),
resolved_provisioning_profile_tool = _resolve_tools_for_executable(
attr_name = "provisioning_profile_tool",
rule_ctx = ctx,
),
resolved_swift_stdlib_tool = _resolve_tools_for_executable(
attr_name = "swift_stdlib_tool",
rule_ctx = ctx,
),
resolved_xctoolrunner = _resolve_tools_for_executable(
attr_name = "xctoolrunner",
rule_ctx = ctx,
),
),
DefaultInfo(),
]
apple_mac_tools_toolchain = rule(
attrs = {
"alticonstool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to insert alternate icons entries in the app bundle's `Info.plist`.
""",
),
"bundletool_experimental": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing an experimental tool to create an Apple bundle by combining the bundling,
post-processing, and signing steps into a single action that eliminates the archiving step.
""",
),
"clangrttool": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool to find all Clang runtime libs linked to a binary.",
),
"codesigningtool": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool to assist in signing bundles.",
),
"dossier_codesigningtool": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool to assist in generating signing dossiers.",
),
"dsym_info_plist_template": attr.label(
cfg = "exec",
allow_single_file = True,
doc = "A `File` referencing a plist template for dSYM bundles.",
),
"environment_plist_tool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to collect data from the development environment to be record into
final bundles.
""",
),
"imported_dynamic_framework_processor": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to process an imported dynamic framework such that the given framework
only contains the same slices as the app binary, every file belonging to the dynamic framework is
copied to a temporary location, and the dynamic framework is codesigned and zipped as a cacheable
artifact.
""",
),
"plisttool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to perform plist operations such as variable substitution, merging, and
conversion of plist files to binary format.
""",
),
"process_and_sign_template": attr.label(
allow_single_file = True,
doc = "A `File` referencing a template for a shell script to process and sign.",
),
"provisioning_profile_tool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool that extracts entitlements from a provisioning profile.
""",
),
"swift_stdlib_tool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool that copies and lipos Swift stdlibs required for the target to run.
""",
),
"xctoolrunner": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool that acts as a wrapper for xcrun actions.",
),
},
doc = """Represents an Apple support toolchain for tools that must run on a Mac""",
implementation = _apple_mac_tools_toolchain_impl,
)
def _apple_xplat_tools_toolchain_impl(ctx):
return [
AppleXPlatToolsToolchainInfo(
resolved_bundletool = _resolve_tools_for_executable(
attr_name = "bundletool",
rule_ctx = ctx,
),
resolved_versiontool = _resolve_tools_for_executable(
attr_name = "versiontool",
rule_ctx = ctx,
),
),
DefaultInfo(),
]
apple_xplat_tools_toolchain = rule(
attrs = {
"bundletool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to create an Apple bundle by taking a list of files/ZIPs and destination
paths to build the directory structure for those files.
""",
),
"versiontool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool for extracting version info from builds.
""",
),
},
doc = """Represents an Apple support toolchain for tools that can run on any platform""",
implementation = _apple_xplat_tools_toolchain_impl,
)
# Define the loadable module that lists the exported symbols in this file.
apple_toolchain_utils = struct(
shared_attrs = _shared_attrs,
)
| 39.390476 | 100 | 0.651273 |
AppleMacToolsToolchainInfo = provider(
doc = """
Propagates information about an Apple toolchain to internal bundling rules that use the toolchain.
This provider exists as an internal detail for the rules to reference common, executable tools and
files used as script templates for the purposes of executing Apple actions. Defined by the
`apple_mac_tools_toolchain` rule.
This toolchain is for the tools (and support files) for actions that *must* run on a Mac.
""",
fields = {
"dsym_info_plist_template": """\
A `File` referencing a plist template for dSYM bundles.
""",
"process_and_sign_template": """\
A `File` referencing a template for a shell script to process and sign.
""",
"resolved_alticonstool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to insert alternate icons entries in the app
bundle's `Info.plist`.
""",
"resolved_bundletool_experimental": """\
A `struct` from `ctx.resolve_tools` referencing an experimental tool to create an Apple bundle by
combining the bundling, post-processing, and signing steps into a single action that eliminates the
archiving step.
""",
"resolved_clangrttool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to find all Clang runtime libs linked to a
binary.
""",
"resolved_codesigningtool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to select the appropriate signing identity
for Apple apps and Apple executable bundles.
""",
"resolved_dossier_codesigningtool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to generate codesigning dossiers.
""",
"resolved_environment_plist_tool": """\
A `struct` from `ctx.resolve_tools` referencing a tool for collecting dev environment values.
""",
"resolved_imported_dynamic_framework_processor": """\
A `struct` from `ctx.resolve_tools` referencing a tool to process an imported dynamic framework
such that the given framework only contains the same slices as the app binary, every file belonging
to the dynamic framework is copied to a temporary location, and the dynamic framework is codesigned
and zipped as a cacheable artifact.
""",
"resolved_plisttool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to perform plist operations such as variable
substitution, merging, and conversion of plist files to binary format.
""",
"resolved_provisioning_profile_tool": """\
A `struct` from `ctx.resolve_tools` referencing a tool that extracts entitlements from a
provisioning profile.
""",
"resolved_swift_stdlib_tool": """\
A `struct` from `ctx.resolve_tools` referencing a tool that copies and lipos Swift stdlibs required
for the target to run.
""",
"resolved_xctoolrunner": """\
A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions.
""",
},
)
AppleXPlatToolsToolchainInfo = provider(
doc = """
Propagates information about an Apple toolchain to internal bundling rules that use the toolchain.
This provider exists as an internal detail for the rules to reference common, executable tools and
files used as script templates for the purposes of executing Apple actions. Defined by the
`apple_xplat_tools_toolchain` rule.
This toolchain is for the tools (and support files) for actions that can run on any platform,
i.e. - they do *not* have to run on a Mac.
""",
fields = {
"resolved_bundletool": """\
A `struct` from `ctx.resolve_tools` referencing a tool to create an Apple bundle by taking a list of
files/ZIPs and destinations paths to build the directory structure for those files.
""",
"resolved_versiontool": """\
A `struct` from `ctx.resolve_tools` referencing a tool that acts as a wrapper for xcrun actions.
""",
},
)
def _shared_attrs():
return {
"_mac_toolchain": attr.label(
default = Label("@build_bazel_rules_apple//apple/internal:mac_tools_toolchain"),
providers = [[AppleMacToolsToolchainInfo]],
),
"_xplat_toolchain": attr.label(
default = Label("@build_bazel_rules_apple//apple/internal:xplat_tools_toolchain"),
providers = [[AppleXPlatToolsToolchainInfo]],
),
}
def _resolve_tools_for_executable(*, rule_ctx, attr_name):
# TODO(b/111036105) Migrate away from this helper and its outputs once ctx.executable works
# across rule boundaries.
executable = getattr(rule_ctx.executable, attr_name)
target = getattr(rule_ctx.attr, attr_name)
inputs, input_manifests = rule_ctx.resolve_tools(tools = [target])
return struct(
executable = executable,
inputs = inputs,
input_manifests = input_manifests,
)
def _apple_mac_tools_toolchain_impl(ctx):
return [
AppleMacToolsToolchainInfo(
dsym_info_plist_template = ctx.file.dsym_info_plist_template,
process_and_sign_template = ctx.file.process_and_sign_template,
resolved_alticonstool = _resolve_tools_for_executable(
attr_name = "alticonstool",
rule_ctx = ctx,
),
resolved_bundletool_experimental = _resolve_tools_for_executable(
attr_name = "bundletool_experimental",
rule_ctx = ctx,
),
resolved_codesigningtool = _resolve_tools_for_executable(
attr_name = "codesigningtool",
rule_ctx = ctx,
),
resolved_dossier_codesigningtool = _resolve_tools_for_executable(
attr_name = "dossier_codesigningtool",
rule_ctx = ctx,
),
resolved_clangrttool = _resolve_tools_for_executable(
attr_name = "clangrttool",
rule_ctx = ctx,
),
resolved_environment_plist_tool = _resolve_tools_for_executable(
attr_name = "environment_plist_tool",
rule_ctx = ctx,
),
resolved_imported_dynamic_framework_processor = _resolve_tools_for_executable(
attr_name = "imported_dynamic_framework_processor",
rule_ctx = ctx,
),
resolved_plisttool = _resolve_tools_for_executable(
attr_name = "plisttool",
rule_ctx = ctx,
),
resolved_provisioning_profile_tool = _resolve_tools_for_executable(
attr_name = "provisioning_profile_tool",
rule_ctx = ctx,
),
resolved_swift_stdlib_tool = _resolve_tools_for_executable(
attr_name = "swift_stdlib_tool",
rule_ctx = ctx,
),
resolved_xctoolrunner = _resolve_tools_for_executable(
attr_name = "xctoolrunner",
rule_ctx = ctx,
),
),
DefaultInfo(),
]
apple_mac_tools_toolchain = rule(
attrs = {
"alticonstool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to insert alternate icons entries in the app bundle's `Info.plist`.
""",
),
"bundletool_experimental": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing an experimental tool to create an Apple bundle by combining the bundling,
post-processing, and signing steps into a single action that eliminates the archiving step.
""",
),
"clangrttool": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool to find all Clang runtime libs linked to a binary.",
),
"codesigningtool": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool to assist in signing bundles.",
),
"dossier_codesigningtool": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool to assist in generating signing dossiers.",
),
"dsym_info_plist_template": attr.label(
cfg = "exec",
allow_single_file = True,
doc = "A `File` referencing a plist template for dSYM bundles.",
),
"environment_plist_tool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to collect data from the development environment to be record into
final bundles.
""",
),
"imported_dynamic_framework_processor": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to process an imported dynamic framework such that the given framework
only contains the same slices as the app binary, every file belonging to the dynamic framework is
copied to a temporary location, and the dynamic framework is codesigned and zipped as a cacheable
artifact.
""",
),
"plisttool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to perform plist operations such as variable substitution, merging, and
conversion of plist files to binary format.
""",
),
"process_and_sign_template": attr.label(
allow_single_file = True,
doc = "A `File` referencing a template for a shell script to process and sign.",
),
"provisioning_profile_tool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool that extracts entitlements from a provisioning profile.
""",
),
"swift_stdlib_tool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool that copies and lipos Swift stdlibs required for the target to run.
""",
),
"xctoolrunner": attr.label(
cfg = "exec",
executable = True,
doc = "A `File` referencing a tool that acts as a wrapper for xcrun actions.",
),
},
doc = """Represents an Apple support toolchain for tools that must run on a Mac""",
implementation = _apple_mac_tools_toolchain_impl,
)
def _apple_xplat_tools_toolchain_impl(ctx):
return [
AppleXPlatToolsToolchainInfo(
resolved_bundletool = _resolve_tools_for_executable(
attr_name = "bundletool",
rule_ctx = ctx,
),
resolved_versiontool = _resolve_tools_for_executable(
attr_name = "versiontool",
rule_ctx = ctx,
),
),
DefaultInfo(),
]
apple_xplat_tools_toolchain = rule(
attrs = {
"bundletool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool to create an Apple bundle by taking a list of files/ZIPs and destination
paths to build the directory structure for those files.
""",
),
"versiontool": attr.label(
cfg = "exec",
executable = True,
doc = """
A `File` referencing a tool for extracting version info from builds.
""",
),
},
doc = """Represents an Apple support toolchain for tools that can run on any platform""",
implementation = _apple_xplat_tools_toolchain_impl,
)
apple_toolchain_utils = struct(
shared_attrs = _shared_attrs,
)
| true | true |
f723ee4aca2ba51e913883657260206b3974214b | 597 | py | Python | psaw/decorators.py | LeartS/PSAW | fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2 | [
"MIT"
] | null | null | null | psaw/decorators.py | LeartS/PSAW | fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2 | [
"MIT"
] | null | null | null | psaw/decorators.py | LeartS/PSAW | fd0faac7205e10cc6fcb3654de8e2b23a0d79bf2 | [
"MIT"
] | null | null | null | from .exceptions import PSAWException
def requires_private_key(method):
def wrapper(self, *args, **kwargs):
if not self.private_key:
raise PSAWException(
'The {} method requires a private key'.format(method.__name__))
return method(self, *args, **kwargs)
return wrapper
def requires_api_key(method):
def wrapper(self, *args, **kwargs):
if not self.api_key:
raise PSAWException(
'The {} method requires an API key'.format(method.__name__))
return method(self, *args, **kwargs)
return wrapper
| 33.166667 | 79 | 0.633166 | from .exceptions import PSAWException
def requires_private_key(method):
def wrapper(self, *args, **kwargs):
if not self.private_key:
raise PSAWException(
'The {} method requires a private key'.format(method.__name__))
return method(self, *args, **kwargs)
return wrapper
def requires_api_key(method):
def wrapper(self, *args, **kwargs):
if not self.api_key:
raise PSAWException(
'The {} method requires an API key'.format(method.__name__))
return method(self, *args, **kwargs)
return wrapper
| true | true |
f723ee57e5b3ea5abd16c6bfccb377f6f8af7698 | 532 | py | Python | stats/attendance.py | lxchen2019/Python-Baseball | 0498830e92c67de8221aac1777651ae141df0ec6 | [
"MIT"
] | null | null | null | stats/attendance.py | lxchen2019/Python-Baseball | 0498830e92c67de8221aac1777651ae141df0ec6 | [
"MIT"
] | null | null | null | stats/attendance.py | lxchen2019/Python-Baseball | 0498830e92c67de8221aac1777651ae141df0ec6 | [
"MIT"
] | null | null | null | import pandas as pd
import matplotlib.pyplot as plt
from data import games
attendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']]
attendance.columns = ['year', 'attendance']
attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])
attendance.plot(x='year', y='attendance', figsize = (15, 7), kind = 'bar')
plt.xlabel('Year')
plt.ylabel('Attendance')
plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')
plt.show()
| 33.25 | 105 | 0.680451 | import pandas as pd
import matplotlib.pyplot as plt
from data import games
attendance = games.loc[(games['type'] == 'info') & (games['multi2'] == 'attendance'), ['year', 'multi3']]
attendance.columns = ['year', 'attendance']
attendance.loc[:, 'attendance'] = pd.to_numeric(attendance.loc[:, 'attendance'])
attendance.plot(x='year', y='attendance', figsize = (15, 7), kind = 'bar')
plt.xlabel('Year')
plt.ylabel('Attendance')
plt.axhline(y=attendance['attendance'].mean(), label='Mean', linestyle='--', color='green')
plt.show()
| true | true |
f723ef0ead92ce0867c0219f60d16635c90e2cd6 | 2,755 | py | Python | ch1/recipe4/load_save_model.py | xinglu/Tensorflow-2.0-Computer-Vision-Cookbook | d02c57d566f9df8b5980d58fc51a1194faef442c | [
"MIT"
] | 1 | 2021-11-27T05:44:01.000Z | 2021-11-27T05:44:01.000Z | ch1/recipe4/load_save_model.py | ArjunVarma39/Tensorflow-2.0-Computer-Vision-Cookbook | 92ea6713f664cff9eccaaccea8ac756f808e2066 | [
"MIT"
] | null | null | null | ch1/recipe4/load_save_model.py | ArjunVarma39/Tensorflow-2.0-Computer-Vision-Cookbook | 92ea6713f664cff9eccaaccea8ac756f808e2066 | [
"MIT"
] | 1 | 2021-01-21T04:36:33.000Z | 2021-01-21T04:36:33.000Z | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
def load_data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# Normalize data.
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
# Reshape grayscale to include channel dimension.
X_train = np.expand_dims(X_train, axis=3)
X_test = np.expand_dims(X_test, axis=3)
# Process labels.
label_binarizer = LabelBinarizer()
y_train = label_binarizer.fit_transform(y_train)
y_test = label_binarizer.fit_transform(y_test)
return X_train, y_train, X_test, y_test
def build_network():
input_layer = Input(shape=(28, 28, 1), name='input_layer')
convolution_1 = Conv2D(kernel_size=(2, 2),
padding='same',
strides=(2, 2),
filters=32,
name='convolution_1')(input_layer)
activation_1 = ReLU(name='activation_1')(convolution_1)
batch_normalization_1 = BatchNormalization(name='batch_normalization_1')(activation_1)
pooling_1 = MaxPooling2D(pool_size=(2, 2),
strides=(1, 1),
padding='same',
name='pooling_1')(batch_normalization_1)
dropout = Dropout(rate=0.5, name='dropout')(pooling_1)
flatten = Flatten(name='flatten')(dropout)
dense_1 = Dense(units=128, name='dense_1')(flatten)
activation_2 = ReLU(name='activation_2')(dense_1)
dense_2 = Dense(units=10, name='dense_2')(activation_2)
output = Softmax(name='output')(dense_2)
network = Model(inputs=input_layer, outputs=output, name='my_model')
return network
def evaluate(model, X_test, y_test):
_, accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'Accuracy: {accuracy}')
print('Loading and pre-processing data.')
X_train, y_train, X_test, y_test = load_data()
# Split dataset.
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8)
# Build network.
model = build_network()
# Compile and train model.
print('Training network...')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=40, batch_size=1024)
print('Saving model and weights as HDF5.')
model.save('model_and_weights.hdf5')
print('Loading model and weights as HDF5.')
loaded_model = load_model('model_and_weights.hdf5')
print('Evaluating using loaded model.')
evaluate(loaded_model, X_test, y_test)
| 34.012346 | 91 | 0.684211 | import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
def load_data():
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.astype('float32') / 255.0
X_test = X_test.astype('float32') / 255.0
X_train = np.expand_dims(X_train, axis=3)
X_test = np.expand_dims(X_test, axis=3)
label_binarizer = LabelBinarizer()
y_train = label_binarizer.fit_transform(y_train)
y_test = label_binarizer.fit_transform(y_test)
return X_train, y_train, X_test, y_test
def build_network():
input_layer = Input(shape=(28, 28, 1), name='input_layer')
convolution_1 = Conv2D(kernel_size=(2, 2),
padding='same',
strides=(2, 2),
filters=32,
name='convolution_1')(input_layer)
activation_1 = ReLU(name='activation_1')(convolution_1)
batch_normalization_1 = BatchNormalization(name='batch_normalization_1')(activation_1)
pooling_1 = MaxPooling2D(pool_size=(2, 2),
strides=(1, 1),
padding='same',
name='pooling_1')(batch_normalization_1)
dropout = Dropout(rate=0.5, name='dropout')(pooling_1)
flatten = Flatten(name='flatten')(dropout)
dense_1 = Dense(units=128, name='dense_1')(flatten)
activation_2 = ReLU(name='activation_2')(dense_1)
dense_2 = Dense(units=10, name='dense_2')(activation_2)
output = Softmax(name='output')(dense_2)
network = Model(inputs=input_layer, outputs=output, name='my_model')
return network
def evaluate(model, X_test, y_test):
_, accuracy = model.evaluate(X_test, y_test, verbose=0)
print(f'Accuracy: {accuracy}')
print('Loading and pre-processing data.')
X_train, y_train, X_test, y_test = load_data()
X_train, X_valid, y_train, y_valid = train_test_split(X_train, y_train, train_size=0.8)
model = build_network()
print('Training network...')
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(X_train, y_train, validation_data=(X_valid, y_valid), epochs=40, batch_size=1024)
print('Saving model and weights as HDF5.')
model.save('model_and_weights.hdf5')
print('Loading model and weights as HDF5.')
loaded_model = load_model('model_and_weights.hdf5')
print('Evaluating using loaded model.')
evaluate(loaded_model, X_test, y_test)
| true | true |
f723f0b24682ef7fb196a22b5764bea459ee3007 | 1,103 | py | Python | String-Algorithms/String-Algorithms-master/One Edit Away/oneEditAway.py | SrijaniSom/dsa-code-store | 148292c8f963214629f271ec8601e73d3d0e145e | [
"MIT"
] | 3 | 2021-02-19T07:09:46.000Z | 2021-10-04T10:12:45.000Z | String-Algorithms/String-Algorithms-master/One Edit Away/oneEditAway.py | SrijaniSom/dsa-code-store | 148292c8f963214629f271ec8601e73d3d0e145e | [
"MIT"
] | 6 | 2021-02-21T19:35:18.000Z | 2021-05-06T11:51:37.000Z | String-Algorithms/String-Algorithms-master/One Edit Away/oneEditAway.py | SrijaniSom/dsa-code-store | 148292c8f963214629f271ec8601e73d3d0e145e | [
"MIT"
] | 6 | 2021-02-21T19:28:03.000Z | 2021-10-04T03:35:57.000Z | class solution:
def oneEditAwayInsert(self,input1,input2):
index1 = 0
index2 = 0
while((index2 < len(input2)) and (index1 < len(input1))):
if(input1[index1] != input2[index2]):
if(index1 != index2):
return False
index2+=1
else:
index1+=1
index2+=1
return True
def oneEditAwayReplace(self,input1,input2):
flag = False
for i in range(len(input1)):
if(input2[i]!=input1[i]):
if(flag):
return False
flag = True
return True
def oneEditAway(self,input1,input2):
if(len(input1)==len(input2)):
return self.oneEditAwayReplace(input1,input2)
elif(len(input1)+1==len(input2)):
return self.oneEditAwayInsert(input1,input2)
elif(len(input1)-1==len(input2)):
return self.oneEditAwayInsert(input2,input1)
return False
input1 = input()
input2 = input()
sol = solution()
print(sol.oneEditAway(input1,input2))
| 29.810811 | 65 | 0.537625 | class solution:
def oneEditAwayInsert(self,input1,input2):
index1 = 0
index2 = 0
while((index2 < len(input2)) and (index1 < len(input1))):
if(input1[index1] != input2[index2]):
if(index1 != index2):
return False
index2+=1
else:
index1+=1
index2+=1
return True
def oneEditAwayReplace(self,input1,input2):
flag = False
for i in range(len(input1)):
if(input2[i]!=input1[i]):
if(flag):
return False
flag = True
return True
def oneEditAway(self,input1,input2):
if(len(input1)==len(input2)):
return self.oneEditAwayReplace(input1,input2)
elif(len(input1)+1==len(input2)):
return self.oneEditAwayInsert(input1,input2)
elif(len(input1)-1==len(input2)):
return self.oneEditAwayInsert(input2,input1)
return False
input1 = input()
input2 = input()
sol = solution()
print(sol.oneEditAway(input1,input2))
| true | true |
f723f1da4d9a432f7832da330a3205c3fef79a2c | 6,577 | py | Python | python/ccxt/async/coinexchange.py | hippylover/ccxt | db304e95b699c1971ad37b9053ae71fcb5dc3b03 | [
"MIT"
] | 2 | 2018-02-28T02:51:59.000Z | 2018-02-28T03:25:51.000Z | python/ccxt/async/coinexchange.py | hippylover/ccxt | db304e95b699c1971ad37b9053ae71fcb5dc3b03 | [
"MIT"
] | null | null | null | python/ccxt/async/coinexchange.py | hippylover/ccxt | db304e95b699c1971ad37b9053ae71fcb5dc3b03 | [
"MIT"
] | 9 | 2018-02-20T18:24:00.000Z | 2019-06-18T14:23:11.000Z | # -*- coding: utf-8 -*-
from ccxt.async.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
class coinexchange (Exchange):
def describe(self):
return self.deep_extend(super(coinexchange, self).describe(), {
'id': 'coinexchange',
'name': 'CoinExchange',
'countries': ['IN', 'JP', 'KR', 'VN', 'US'],
'rateLimit': 1000,
# new metainfo interface
'has': {
'privateAPI': False,
'fetchTrades': False,
'fetchCurrencies': True,
'fetchTickers': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/34842303-29c99fca-f71c-11e7-83c1-09d900cb2334.jpg',
'api': 'https://www.coinexchange.io/api/v1',
'www': 'https://www.coinexchange.io',
'doc': 'https://coinexchangeio.github.io/slate/',
'fees': 'https://www.coinexchange.io/fees',
},
'api': {
'public': {
'get': [
'getcurrency',
'getcurrencies',
'getmarkets',
'getmarketsummaries',
'getmarketsummary',
'getorderbook',
],
},
},
'fees': {
'trading': {
'maker': 0.0015,
'taker': 0.0015,
},
},
'precision': {
'amount': 8,
'price': 8,
},
})
def common_currency_code(self, currency):
return currency
async def fetch_currencies(self, params={}):
currencies = await self.publicGetCurrencies(params)
precision = self.precision['amount']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['CurrencyID']
code = self.common_currency_code(currency['TickerCode'])
active = currency['WalletStatus'] == 'online'
status = 'ok'
if not active:
status = 'disabled'
result[code] = {
'id': id,
'code': code,
'name': currency['Name'],
'active': active,
'status': status,
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
async def fetch_markets(self):
markets = await self.publicGetMarkets()
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['MarketID']
base = self.common_currency_code(market['MarketAssetCode'])
quote = self.common_currency_code(market['BaseCurrencyCode'])
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'lot': None,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
if not market:
marketId = ticker['MarketID']
market = self.marketsById[marketId]
symbol = None
if market:
symbol = market['symbol']
timestamp = self.milliseconds()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['HighPrice']),
'low': float(ticker['LowPrice']),
'bid': float(ticker['BidPrice']),
'ask': float(ticker['AskPrice']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['LastPrice']),
'change': float(ticker['Change']),
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': float(ticker['Volume']),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetMarketsummary(self.extend({
'market_id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetMarketsummaries(params)
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderbook(self.extend({
'market_id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'BuyOrders', 'SellOrders', 'Price', 'Quantity')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
params = self.urlencode(params)
if len(params):
url += '?' + params
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
success = self.safe_integer(response, 'success')
if success != 1:
raise ExchangeError(response['message'])
return response['result']
| 35.360215 | 126 | 0.466626 |
from ccxt.async.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
class coinexchange (Exchange):
def describe(self):
return self.deep_extend(super(coinexchange, self).describe(), {
'id': 'coinexchange',
'name': 'CoinExchange',
'countries': ['IN', 'JP', 'KR', 'VN', 'US'],
'rateLimit': 1000,
'has': {
'privateAPI': False,
'fetchTrades': False,
'fetchCurrencies': True,
'fetchTickers': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/34842303-29c99fca-f71c-11e7-83c1-09d900cb2334.jpg',
'api': 'https://www.coinexchange.io/api/v1',
'www': 'https://www.coinexchange.io',
'doc': 'https://coinexchangeio.github.io/slate/',
'fees': 'https://www.coinexchange.io/fees',
},
'api': {
'public': {
'get': [
'getcurrency',
'getcurrencies',
'getmarkets',
'getmarketsummaries',
'getmarketsummary',
'getorderbook',
],
},
},
'fees': {
'trading': {
'maker': 0.0015,
'taker': 0.0015,
},
},
'precision': {
'amount': 8,
'price': 8,
},
})
def common_currency_code(self, currency):
return currency
async def fetch_currencies(self, params={}):
currencies = await self.publicGetCurrencies(params)
precision = self.precision['amount']
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
id = currency['CurrencyID']
code = self.common_currency_code(currency['TickerCode'])
active = currency['WalletStatus'] == 'online'
status = 'ok'
if not active:
status = 'disabled'
result[code] = {
'id': id,
'code': code,
'name': currency['Name'],
'active': active,
'status': status,
'precision': precision,
'limits': {
'amount': {
'min': None,
'max': math.pow(10, precision),
},
'price': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'cost': {
'min': None,
'max': None,
},
'withdraw': {
'min': None,
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
async def fetch_markets(self):
markets = await self.publicGetMarkets()
result = []
for i in range(0, len(markets)):
market = markets[i]
id = market['MarketID']
base = self.common_currency_code(market['MarketAssetCode'])
quote = self.common_currency_code(market['BaseCurrencyCode'])
symbol = base + '/' + quote
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': True,
'lot': None,
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
if not market:
marketId = ticker['MarketID']
market = self.marketsById[marketId]
symbol = None
if market:
symbol = market['symbol']
timestamp = self.milliseconds()
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': float(ticker['HighPrice']),
'low': float(ticker['LowPrice']),
'bid': float(ticker['BidPrice']),
'ask': float(ticker['AskPrice']),
'vwap': None,
'open': None,
'close': None,
'first': None,
'last': float(ticker['LastPrice']),
'change': float(ticker['Change']),
'percentage': None,
'average': None,
'baseVolume': None,
'quoteVolume': float(ticker['Volume']),
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
ticker = await self.publicGetMarketsummary(self.extend({
'market_id': market['id'],
}, params))
return self.parse_ticker(ticker, market)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
tickers = await self.publicGetMarketsummaries(params)
result = {}
for i in range(0, len(tickers)):
ticker = self.parse_ticker(tickers[i])
symbol = ticker['symbol']
result[symbol] = ticker
return result
async def fetch_order_book(self, symbol, params={}):
await self.load_markets()
orderbook = await self.publicGetOrderbook(self.extend({
'market_id': self.market_id(symbol),
}, params))
return self.parse_order_book(orderbook, None, 'BuyOrders', 'SellOrders', 'Price', 'Quantity')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'] + '/' + path
if api == 'public':
params = self.urlencode(params)
if len(params):
url += '?' + params
return {'url': url, 'method': method, 'body': body, 'headers': headers}
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
success = self.safe_integer(response, 'success')
if success != 1:
raise ExchangeError(response['message'])
return response['result']
| false | true |
f723f2da7cb69cc97d7d2508483485aef439d3c6 | 5,384 | py | Python | 1-50/p13.py | YiWeiShen/Project-Euler-Hints | a79cacab075dd98d393516f083aaa7ffc6115a06 | [
"MIT"
] | 1 | 2019-02-25T13:00:31.000Z | 2019-02-25T13:00:31.000Z | 1-50/p13.py | YiWeiShen/Project-Euler-Hints | a79cacab075dd98d393516f083aaa7ffc6115a06 | [
"MIT"
] | null | null | null | 1-50/p13.py | YiWeiShen/Project-Euler-Hints | a79cacab075dd98d393516f083aaa7ffc6115a06 | [
"MIT"
] | null | null | null | l = []
a = '37107287533902102798797998220837590246510135740250\
46376937677490009712648124896970078050417018260538\
74324986199524741059474233309513058123726617309629\
91942213363574161572522430563301811072406154908250\
23067588207539346171171980310421047513778063246676\
89261670696623633820136378418383684178734361726757\
28112879812849979408065481931592621691275889832738\
44274228917432520321923589422876796487670272189318\
47451445736001306439091167216856844588711603153276\
70386486105843025439939619828917593665686757934951\
62176457141856560629502157223196586755079324193331\
64906352462741904929101432445813822663347944758178\
92575867718337217661963751590579239728245598838407\
58203565325359399008402633568948830189458628227828\
80181199384826282014278194139940567587151170094390\
35398664372827112653829987240784473053190104293586\
86515506006295864861532075273371959191420517255829\
71693888707715466499115593487603532921714970056938\
54370070576826684624621495650076471787294438377604\
53282654108756828443191190634694037855217779295145\
36123272525000296071075082563815656710885258350721\
45876576172410976447339110607218265236877223636045\
17423706905851860660448207621209813287860733969412\
81142660418086830619328460811191061556940512689692\
51934325451728388641918047049293215058642563049483\
62467221648435076201727918039944693004732956340691\
15732444386908125794514089057706229429197107928209\
55037687525678773091862540744969844508330393682126\
18336384825330154686196124348767681297534375946515\
80386287592878490201521685554828717201219257766954\
78182833757993103614740356856449095527097864797581\
16726320100436897842553539920931837441497806860984\
48403098129077791799088218795327364475675590848030\
87086987551392711854517078544161852424320693150332\
59959406895756536782107074926966537676326235447210\
69793950679652694742597709739166693763042633987085\
41052684708299085211399427365734116182760315001271\
65378607361501080857009149939512557028198746004375\
35829035317434717326932123578154982629742552737307\
94953759765105305946966067683156574377167401875275\
88902802571733229619176668713819931811048770190271\
25267680276078003013678680992525463401061632866526\
36270218540497705585629946580636237993140746255962\
24074486908231174977792365466257246923322810917141\
91430288197103288597806669760892938638285025333403\
34413065578016127815921815005561868836468420090470\
23053081172816430487623791969842487255036638784583\
11487696932154902810424020138335124462181441773470\
63783299490636259666498587618221225225512486764533\
67720186971698544312419572409913959008952310058822\
95548255300263520781532296796249481641953868218774\
76085327132285723110424803456124867697064507995236\
37774242535411291684276865538926205024910326572967\
23701913275725675285653248258265463092207058596522\
29798860272258331913126375147341994889534765745501\
18495701454879288984856827726077713721403798879715\
38298203783031473527721580348144513491373226651381\
34829543829199918180278916522431027392251122869539\
40957953066405232632538044100059654939159879593635\
29746152185502371307642255121183693803580388584903\
41698116222072977186158236678424689157993532961922\
62467957194401269043877107275048102390895523597457\
23189706772547915061505504953922979530901129967519\
86188088225875314529584099251203829009407770775672\
11306739708304724483816533873502340845647058077308\
82959174767140363198008187129011875491310547126581\
97623331044818386269515456334926366572897563400500\
42846280183517070527831839425882145521227251250327\
55121603546981200581762165212827652751691296897789\
32238195734329339946437501907836945765883352399886\
75506164965184775180738168837861091527357929701337\
62177842752192623401942399639168044983993173312731\
32924185707147349566916674687634660915035914677504\
99518671430235219628894890102423325116913619626622\
73267460800591547471830798392868535206946944540724\
76841822524674417161514036427982273348055556214818\
97142617910342598647204516893989422179826088076852\
87783646182799346313767754307809363333018982642090\
10848802521674670883215120185883543223812876952786\
71329612474782464538636993009049310363619763878039\
62184073572399794223406235393808339651327408011116\
66627891981488087797941876876144230030984490851411\
60661826293682836764744779239180335110989069790714\
85786944089552990653640447425576083659976645795096\
66024396409905389607120198219976047599490197230297\
64913982680032973156037120041377903785566085089252\
16730939319872750275468906903707539413042652315011\
94809377245048795150954100921645863754710598436791\
78639167021187492431995700641917969777599028300699\
15368713711936614952811305876380278410754449733078\
40789923115535562561142322423255033685442488917353\
44889911501440648020369068063960672322193204149535\
41503128880339536053299340368006977710650566631954\
81234880673210146739058568557934581403627822703280\
82616570773948327592232845941706525094512325230608\
22918802058777319719839450180888072429661980811197\
77158542502016545090413245809786882778948721859617\
72107838435069186155435662884062257473692284509516\
20849603980134001723930671666823555245252804609722\
53503534226472524250874054075591789781264330331690'
if __name__ == '__main__':
for x in range(0, 5000, 50):
l.append(''.join(list(a)[x:x+50]))
sum = 0
for x in l:
sum += int(x)
print(sum)
| 47.646018 | 56 | 0.944279 | l = []
a = '37107287533902102798797998220837590246510135740250\
46376937677490009712648124896970078050417018260538\
74324986199524741059474233309513058123726617309629\
91942213363574161572522430563301811072406154908250\
23067588207539346171171980310421047513778063246676\
89261670696623633820136378418383684178734361726757\
28112879812849979408065481931592621691275889832738\
44274228917432520321923589422876796487670272189318\
47451445736001306439091167216856844588711603153276\
70386486105843025439939619828917593665686757934951\
62176457141856560629502157223196586755079324193331\
64906352462741904929101432445813822663347944758178\
92575867718337217661963751590579239728245598838407\
58203565325359399008402633568948830189458628227828\
80181199384826282014278194139940567587151170094390\
35398664372827112653829987240784473053190104293586\
86515506006295864861532075273371959191420517255829\
71693888707715466499115593487603532921714970056938\
54370070576826684624621495650076471787294438377604\
53282654108756828443191190634694037855217779295145\
36123272525000296071075082563815656710885258350721\
45876576172410976447339110607218265236877223636045\
17423706905851860660448207621209813287860733969412\
81142660418086830619328460811191061556940512689692\
51934325451728388641918047049293215058642563049483\
62467221648435076201727918039944693004732956340691\
15732444386908125794514089057706229429197107928209\
55037687525678773091862540744969844508330393682126\
18336384825330154686196124348767681297534375946515\
80386287592878490201521685554828717201219257766954\
78182833757993103614740356856449095527097864797581\
16726320100436897842553539920931837441497806860984\
48403098129077791799088218795327364475675590848030\
87086987551392711854517078544161852424320693150332\
59959406895756536782107074926966537676326235447210\
69793950679652694742597709739166693763042633987085\
41052684708299085211399427365734116182760315001271\
65378607361501080857009149939512557028198746004375\
35829035317434717326932123578154982629742552737307\
94953759765105305946966067683156574377167401875275\
88902802571733229619176668713819931811048770190271\
25267680276078003013678680992525463401061632866526\
36270218540497705585629946580636237993140746255962\
24074486908231174977792365466257246923322810917141\
91430288197103288597806669760892938638285025333403\
34413065578016127815921815005561868836468420090470\
23053081172816430487623791969842487255036638784583\
11487696932154902810424020138335124462181441773470\
63783299490636259666498587618221225225512486764533\
67720186971698544312419572409913959008952310058822\
95548255300263520781532296796249481641953868218774\
76085327132285723110424803456124867697064507995236\
37774242535411291684276865538926205024910326572967\
23701913275725675285653248258265463092207058596522\
29798860272258331913126375147341994889534765745501\
18495701454879288984856827726077713721403798879715\
38298203783031473527721580348144513491373226651381\
34829543829199918180278916522431027392251122869539\
40957953066405232632538044100059654939159879593635\
29746152185502371307642255121183693803580388584903\
41698116222072977186158236678424689157993532961922\
62467957194401269043877107275048102390895523597457\
23189706772547915061505504953922979530901129967519\
86188088225875314529584099251203829009407770775672\
11306739708304724483816533873502340845647058077308\
82959174767140363198008187129011875491310547126581\
97623331044818386269515456334926366572897563400500\
42846280183517070527831839425882145521227251250327\
55121603546981200581762165212827652751691296897789\
32238195734329339946437501907836945765883352399886\
75506164965184775180738168837861091527357929701337\
62177842752192623401942399639168044983993173312731\
32924185707147349566916674687634660915035914677504\
99518671430235219628894890102423325116913619626622\
73267460800591547471830798392868535206946944540724\
76841822524674417161514036427982273348055556214818\
97142617910342598647204516893989422179826088076852\
87783646182799346313767754307809363333018982642090\
10848802521674670883215120185883543223812876952786\
71329612474782464538636993009049310363619763878039\
62184073572399794223406235393808339651327408011116\
66627891981488087797941876876144230030984490851411\
60661826293682836764744779239180335110989069790714\
85786944089552990653640447425576083659976645795096\
66024396409905389607120198219976047599490197230297\
64913982680032973156037120041377903785566085089252\
16730939319872750275468906903707539413042652315011\
94809377245048795150954100921645863754710598436791\
78639167021187492431995700641917969777599028300699\
15368713711936614952811305876380278410754449733078\
40789923115535562561142322423255033685442488917353\
44889911501440648020369068063960672322193204149535\
41503128880339536053299340368006977710650566631954\
81234880673210146739058568557934581403627822703280\
82616570773948327592232845941706525094512325230608\
22918802058777319719839450180888072429661980811197\
77158542502016545090413245809786882778948721859617\
72107838435069186155435662884062257473692284509516\
20849603980134001723930671666823555245252804609722\
53503534226472524250874054075591789781264330331690'
if __name__ == '__main__':
for x in range(0, 5000, 50):
l.append(''.join(list(a)[x:x+50]))
sum = 0
for x in l:
sum += int(x)
print(sum)
| true | true |
f723f3fb8c08361f204ed3b00ea1c886a4107a59 | 1,457 | py | Python | botutils/searchforlinks.py | yashprakash13/Honeysuckle | d8adb83a63318a8c4994d18aea6fd28116b46f4e | [
"MIT"
] | 2 | 2020-08-11T17:23:05.000Z | 2021-02-20T04:02:33.000Z | botutils/searchforlinks.py | yashprakash13/Honeysuckle | d8adb83a63318a8c4994d18aea6fd28116b46f4e | [
"MIT"
] | null | null | null | botutils/searchforlinks.py | yashprakash13/Honeysuckle | d8adb83a63318a8c4994d18aea6fd28116b46f4e | [
"MIT"
] | 1 | 2020-08-15T05:29:14.000Z | 2020-08-15T05:29:14.000Z | import re
import requests
from bs4 import BeautifulSoup
from botutils.constants import IS_URL_REGEX
def get_ffn_url_from_query(query):
ffn_list = []
href = []
url = 'https://www.google.com/search?q=' + \
query+"+fanfiction"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
found = soup.findAll('a')
for link in found:
href.append(link['href'])
for i in range(len(href)):
if re.search(r"fanfiction.net/s/", href[i]) is not None:
ffn_list.append(href[i])
if not ffn_list:
return None
ffn_url = re.search(IS_URL_REGEX, ffn_list[0])
return ffn_url.group(0)
def get_ao3_url_from_query(query):
ao3_list = []
href = []
url = 'https://www.google.com/search?q=' + \
query+"+archiveofourown"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
found = soup.findAll('a')
for link in found:
href.append(link['href'])
for i in range(len(href)):
# append /works/ first
if re.search(r"\barchiveofourown.org/works/\b", href[i]) is not None:
ao3_list.append(href[i])
# append /chapters/ next
if re.search(r"\barchiveofourown.org/chapters/\b", href[i]) is not None:
ao3_list.append(href[i])
if not ao3_list:
return None
ao3_url = re.search(IS_URL_REGEX, ao3_list[0])
return ao3_url.group(0)
| 22.075758 | 80 | 0.614276 | import re
import requests
from bs4 import BeautifulSoup
from botutils.constants import IS_URL_REGEX
def get_ffn_url_from_query(query):
ffn_list = []
href = []
url = 'https://www.google.com/search?q=' + \
query+"+fanfiction"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
found = soup.findAll('a')
for link in found:
href.append(link['href'])
for i in range(len(href)):
if re.search(r"fanfiction.net/s/", href[i]) is not None:
ffn_list.append(href[i])
if not ffn_list:
return None
ffn_url = re.search(IS_URL_REGEX, ffn_list[0])
return ffn_url.group(0)
def get_ao3_url_from_query(query):
ao3_list = []
href = []
url = 'https://www.google.com/search?q=' + \
query+"+archiveofourown"
page = requests.get(url)
soup = BeautifulSoup(page.content, 'html.parser')
found = soup.findAll('a')
for link in found:
href.append(link['href'])
for i in range(len(href)):
if re.search(r"\barchiveofourown.org/works/\b", href[i]) is not None:
ao3_list.append(href[i])
if re.search(r"\barchiveofourown.org/chapters/\b", href[i]) is not None:
ao3_list.append(href[i])
if not ao3_list:
return None
ao3_url = re.search(IS_URL_REGEX, ao3_list[0])
return ao3_url.group(0)
| true | true |
f723f47402f819e72e9efeddd056d1ccea3fb2f6 | 1,107 | py | Python | bach/tests/unit/bach/test_series_json.py | objectiv/objectiv-analytics | 86ec1508f71c2d61ea7d67479800e4dc417a46e1 | [
"Apache-2.0"
] | 23 | 2021-11-10T21:37:42.000Z | 2022-03-30T11:46:19.000Z | bach/tests/unit/bach/test_series_json.py | objectiv/objectiv-analytics | 86ec1508f71c2d61ea7d67479800e4dc417a46e1 | [
"Apache-2.0"
] | 163 | 2021-11-10T10:11:26.000Z | 2022-03-31T16:04:27.000Z | bach/tests/unit/bach/test_series_json.py | objectiv/objectiv-analytics | 86ec1508f71c2d61ea7d67479800e4dc417a46e1 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2022 Objectiv B.V.
"""
import pytest
from bach.series.series_json import JsonBigQueryAccessorImpl
from tests.unit.bach.util import get_fake_df
@pytest.mark.skip_postgres
def test_bq_get_slice_partial_expr(dialect):
# Here we test the _get_slice_partial_expr function of the BigQuery specific JsonBigQueryAccessor. So
# skipping all other dialects
df = get_fake_df(
dialect=dialect,
index_names=['i'],
data_names=['a'],
dtype='json'
)
jbqa = JsonBigQueryAccessorImpl(df.a)
assert jbqa._get_slice_partial_expr(None, True).to_sql(dialect) == '0'
assert jbqa._get_slice_partial_expr(None, False).to_sql(dialect) == '9223372036854775807'
assert jbqa._get_slice_partial_expr(5, False).to_sql(dialect) == '5'
assert jbqa._get_slice_partial_expr(5, True).to_sql(dialect) == '5'
assert jbqa._get_slice_partial_expr(-5, False).to_sql(dialect) == \
'(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
assert jbqa._get_slice_partial_expr(-5, True).to_sql(dialect) == \
'(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
| 36.9 | 105 | 0.712737 | import pytest
from bach.series.series_json import JsonBigQueryAccessorImpl
from tests.unit.bach.util import get_fake_df
@pytest.mark.skip_postgres
def test_bq_get_slice_partial_expr(dialect):
df = get_fake_df(
dialect=dialect,
index_names=['i'],
data_names=['a'],
dtype='json'
)
jbqa = JsonBigQueryAccessorImpl(df.a)
assert jbqa._get_slice_partial_expr(None, True).to_sql(dialect) == '0'
assert jbqa._get_slice_partial_expr(None, False).to_sql(dialect) == '9223372036854775807'
assert jbqa._get_slice_partial_expr(5, False).to_sql(dialect) == '5'
assert jbqa._get_slice_partial_expr(5, True).to_sql(dialect) == '5'
assert jbqa._get_slice_partial_expr(-5, False).to_sql(dialect) == \
'(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
assert jbqa._get_slice_partial_expr(-5, True).to_sql(dialect) == \
'(ARRAY_LENGTH(JSON_QUERY_ARRAY(`a`)) -5)'
| true | true |
f723f53cdb2ac0facc06438330246f32fcc5c7e7 | 3,101 | py | Python | pupil/models/clustering.py | hadi-gharibi/pupil | 9d266572cc1ebf659e87206be6e5f1548959d510 | [
"Apache-2.0"
] | 2 | 2022-03-31T23:17:14.000Z | 2022-03-31T23:24:58.000Z | pupil/models/clustering.py | hadi-gharibi/pupil | 9d266572cc1ebf659e87206be6e5f1548959d510 | [
"Apache-2.0"
] | null | null | null | pupil/models/clustering.py | hadi-gharibi/pupil | 9d266572cc1ebf659e87206be6e5f1548959d510 | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import Dict, Protocol, Tuple
import faiss
import numpy as np
from pupil.types import NDArray2D
from sklearn.cluster import AgglomerativeClustering
class Clustering(Protocol):
n_clusters: int
def fit(self, X: NDArray2D):
...
def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
...
def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
"""After having the center of your clusters, you can use this function to see the distance from X and center of all clusters
Args:
X (NDArray2D): The input to check.
Returns:
Tuple[NDArray2D, NDArray2D]: Return (Distances, cluster_ids). Shape of each: (#queries, #clusters)
"""
...
class FaissKMeansClustering:
def __init__(
self,
n_clusters: int,
n_init: int = 10,
max_iter: int = 100,
) -> None:
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.cluster_centers_ = None
self.inertia_ = None
def fit(self, X: NDArray2D) -> None:
self.kmeans = faiss.Kmeans(
d=X.shape[1],
k=self.n_clusters,
niter=self.max_iter,
nredo=self.n_init,
)
X = X / np.linalg.norm(X)
self.kmeans.train(X.astype(np.float32))
self.cluster_centers_ = self.kmeans.centroids
self.inertia_ = self.kmeans.obj[-1]
def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
X = X / np.linalg.norm(X)
return self.kmeans.index.search(X.astype(np.float32), 1) # type: ignore
def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
X = X / np.linalg.norm(X)
D, I = self.kmeans.index.search(X.astype(np.float32), self.n_clusters) # type: ignore
return D, I
class Splitter(Protocol):
def fit(self, X: NDArray2D, clsuter_inds: NDArray2D):
...
@property
def splits(
self,
):
...
class Distance1DSplitter:
def __init__(self, nsplits=3):
self.nsplits = nsplits
def fit(self, X: NDArray2D, clsuter_inds: NDArray2D) -> None:
self.clsuter_inds = clsuter_inds
self.alg = AgglomerativeClustering(n_clusters=self.nsplits)
self.alg.fit(X.reshape((-1, 1)))
self._tag_to_index_dict = self._tag_to_index()
def _tag_to_index(self) -> Dict[str, Tuple[int, int]]:
tags = ["priority_" + str(i) for i in range(self.nsplits)]
inds = np.argwhere(np.diff(self.alg.labels_) != 0).flatten().tolist()
inds.insert(0, -1)
inds.append(len(self.alg.labels_))
tag_dict = {}
for i, end in enumerate(inds[1:]):
start = inds[i] + 1
tag_dict[tags[i]] = (start, end + 1)
return tag_dict
@property
def splits(self):
res = {}
for k, v in self._tag_to_index_dict.items():
res[k] = self.clsuter_inds[0][v[0] : v[1]]
return res
| 28.981308 | 132 | 0.603354 | from abc import ABC, abstractmethod
from typing import Dict, Protocol, Tuple
import faiss
import numpy as np
from pupil.types import NDArray2D
from sklearn.cluster import AgglomerativeClustering
class Clustering(Protocol):
n_clusters: int
def fit(self, X: NDArray2D):
...
def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
...
def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
...
class FaissKMeansClustering:
def __init__(
self,
n_clusters: int,
n_init: int = 10,
max_iter: int = 100,
) -> None:
self.n_clusters = n_clusters
self.n_init = n_init
self.max_iter = max_iter
self.cluster_centers_ = None
self.inertia_ = None
def fit(self, X: NDArray2D) -> None:
self.kmeans = faiss.Kmeans(
d=X.shape[1],
k=self.n_clusters,
niter=self.max_iter,
nredo=self.n_init,
)
X = X / np.linalg.norm(X)
self.kmeans.train(X.astype(np.float32))
self.cluster_centers_ = self.kmeans.centroids
self.inertia_ = self.kmeans.obj[-1]
def predict(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
X = X / np.linalg.norm(X)
return self.kmeans.index.search(X.astype(np.float32), 1)
def distance_to_cluster_centers(self, X: NDArray2D) -> Tuple[NDArray2D, NDArray2D]:
X = X / np.linalg.norm(X)
D, I = self.kmeans.index.search(X.astype(np.float32), self.n_clusters)
return D, I
class Splitter(Protocol):
def fit(self, X: NDArray2D, clsuter_inds: NDArray2D):
...
@property
def splits(
self,
):
...
class Distance1DSplitter:
def __init__(self, nsplits=3):
self.nsplits = nsplits
def fit(self, X: NDArray2D, clsuter_inds: NDArray2D) -> None:
self.clsuter_inds = clsuter_inds
self.alg = AgglomerativeClustering(n_clusters=self.nsplits)
self.alg.fit(X.reshape((-1, 1)))
self._tag_to_index_dict = self._tag_to_index()
def _tag_to_index(self) -> Dict[str, Tuple[int, int]]:
tags = ["priority_" + str(i) for i in range(self.nsplits)]
inds = np.argwhere(np.diff(self.alg.labels_) != 0).flatten().tolist()
inds.insert(0, -1)
inds.append(len(self.alg.labels_))
tag_dict = {}
for i, end in enumerate(inds[1:]):
start = inds[i] + 1
tag_dict[tags[i]] = (start, end + 1)
return tag_dict
@property
def splits(self):
res = {}
for k, v in self._tag_to_index_dict.items():
res[k] = self.clsuter_inds[0][v[0] : v[1]]
return res
| true | true |
f723f5a286cb6bd6f00ddea9012b0aaff76a5524 | 6,044 | py | Python | sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query3.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 2 | 2017-12-19T18:34:54.000Z | 2019-05-14T21:50:06.000Z | sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query3.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 80 | 2020-01-16T09:55:09.000Z | 2020-10-03T13:43:07.000Z | sdks/python/apache_beam/testing/benchmarks/nexmark/queries/query3.py | eyal0/beam | 9c6922976cc2a5c6a2ef836c1986ff769cda99a5 | [
"Apache-2.0"
] | 1 | 2020-11-11T18:45:54.000Z | 2020-11-11T18:45:54.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Query 3, 'Local Item Suggestion'. Who is selling in OR, ID or CA in category
10, and for what auction ids? In CQL syntax::
SELECT Istream(P.name, P.city, P.state, A.id)
FROM Auction A [ROWS UNBOUNDED], Person P [ROWS UNBOUNDED]
WHERE A.seller = P.id
AND (P.state = `OR' OR P.state = `ID' OR P.state = `CA')
AND A.category = 10;
We'll implement this query to allow 'new auction' events to come before the
'new person' events for the auction seller. Those auctions will be stored until
the matching person is seen. Then all subsequent auctions for a person will use
the stored person record.
"""
from __future__ import absolute_import
import logging
import apache_beam as beam
from apache_beam.testing.benchmarks.nexmark.models import nexmark_model
from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util
from apache_beam.testing.benchmarks.nexmark.queries.nexmark_query_util import ResultNames
from apache_beam.transforms import trigger
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.transforms.userstate import on_timer
def load(events, metadata=None):
num_events_in_pane = 30
windowed_events = (
events
| beam.WindowInto(
window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(num_events_in_pane)),
accumulation_mode=trigger.AccumulationMode.DISCARDING))
auction_by_seller_id = (
windowed_events
| nexmark_query_util.JustAuctions()
| 'query3_filter_category' >> beam.Filter(lambda auc: auc.category == 10)
| 'query3_key_by_seller' >> beam.ParDo(
nexmark_query_util.AuctionBySellerFn()))
person_by_id = (
windowed_events
| nexmark_query_util.JustPerson()
| 'query3_filter_region' >>
beam.Filter(lambda person: person.state in ['OR', 'ID', 'CA'])
| 'query3_key_by_person_id' >> beam.ParDo(
nexmark_query_util.PersonByIdFn()))
return ({
nexmark_query_util.AUCTION_TAG: auction_by_seller_id,
nexmark_query_util.PERSON_TAG: person_by_id,
}
| beam.CoGroupByKey()
| 'query3_join' >> beam.ParDo(
JoinFn(metadata.get('max_auction_waiting_time')))
| 'query3_output' >> beam.Map(
lambda t: {
ResultNames.NAME: t[1].name,
ResultNames.CITY: t[1].city,
ResultNames.STATE: t[1].state,
ResultNames.AUCTION_ID: t[0].id
}))
class JoinFn(beam.DoFn):
"""
Join auctions and person by person id and emit their product one pair at
a time.
We know a person may submit any number of auctions. Thus new person event
must have the person record stored in persistent state in order to match
future auctions by that person.
However we know that each auction is associated with at most one person, so
only need to store auction records in persistent state until we have seen the
corresponding person record. And of course may have already seen that record.
"""
AUCTIONS = 'auctions_state'
PERSON = 'person_state'
PERSON_EXPIRING = 'person_state_expiring'
auction_spec = userstate.BagStateSpec(AUCTIONS, nexmark_model.Auction.CODER)
person_spec = userstate.ReadModifyWriteStateSpec(
PERSON, nexmark_model.Person.CODER)
person_timer_spec = userstate.TimerSpec(
PERSON_EXPIRING, userstate.TimeDomain.WATERMARK)
def __init__(self, max_auction_wait_time):
self.max_auction_wait_time = max_auction_wait_time
def process(
self,
element,
auction_state=beam.DoFn.StateParam(auction_spec),
person_state=beam.DoFn.StateParam(person_spec),
person_timer=beam.DoFn.TimerParam(person_timer_spec)):
# extract group with tags from element tuple
_, group = element
existing_person = person_state.read()
if existing_person:
# the person exists in person_state for this person id
for auction in group[nexmark_query_util.AUCTION_TAG]:
yield auction, existing_person
return
new_person = None
for person in group[nexmark_query_util.PERSON_TAG]:
if not new_person:
new_person = person
else:
logging.error(
'two new person wtih same key: %s and %s' % (person, new_person))
continue
# read all pending auctions for this person id, output and flush it
pending_auctions = auction_state.read()
if pending_auctions:
for pending_auction in pending_auctions:
yield pending_auction, new_person
auction_state.clear()
# output new auction for this person id
for auction in group[nexmark_query_util.AUCTION_TAG]:
yield auction, new_person
# remember person for max_auction_wait_time seconds for future auctions
person_state.write(new_person)
person_timer.set(new_person.date_time + self.max_auction_wait_time)
# we are done if we have seen a new person
if new_person:
return
# remember auction until we see person
for auction in group[nexmark_query_util.AUCTION_TAG]:
auction_state.add(auction)
@on_timer(person_timer_spec)
def expiry(self, person_state=beam.DoFn.StateParam(person_spec)):
person_state.clear()
| 38.012579 | 89 | 0.720218 |
from __future__ import absolute_import
import logging
import apache_beam as beam
from apache_beam.testing.benchmarks.nexmark.models import nexmark_model
from apache_beam.testing.benchmarks.nexmark.queries import nexmark_query_util
from apache_beam.testing.benchmarks.nexmark.queries.nexmark_query_util import ResultNames
from apache_beam.transforms import trigger
from apache_beam.transforms import userstate
from apache_beam.transforms import window
from apache_beam.transforms.userstate import on_timer
def load(events, metadata=None):
num_events_in_pane = 30
windowed_events = (
events
| beam.WindowInto(
window.GlobalWindows(),
trigger=trigger.Repeatedly(trigger.AfterCount(num_events_in_pane)),
accumulation_mode=trigger.AccumulationMode.DISCARDING))
auction_by_seller_id = (
windowed_events
| nexmark_query_util.JustAuctions()
| 'query3_filter_category' >> beam.Filter(lambda auc: auc.category == 10)
| 'query3_key_by_seller' >> beam.ParDo(
nexmark_query_util.AuctionBySellerFn()))
person_by_id = (
windowed_events
| nexmark_query_util.JustPerson()
| 'query3_filter_region' >>
beam.Filter(lambda person: person.state in ['OR', 'ID', 'CA'])
| 'query3_key_by_person_id' >> beam.ParDo(
nexmark_query_util.PersonByIdFn()))
return ({
nexmark_query_util.AUCTION_TAG: auction_by_seller_id,
nexmark_query_util.PERSON_TAG: person_by_id,
}
| beam.CoGroupByKey()
| 'query3_join' >> beam.ParDo(
JoinFn(metadata.get('max_auction_waiting_time')))
| 'query3_output' >> beam.Map(
lambda t: {
ResultNames.NAME: t[1].name,
ResultNames.CITY: t[1].city,
ResultNames.STATE: t[1].state,
ResultNames.AUCTION_ID: t[0].id
}))
class JoinFn(beam.DoFn):
AUCTIONS = 'auctions_state'
PERSON = 'person_state'
PERSON_EXPIRING = 'person_state_expiring'
auction_spec = userstate.BagStateSpec(AUCTIONS, nexmark_model.Auction.CODER)
person_spec = userstate.ReadModifyWriteStateSpec(
PERSON, nexmark_model.Person.CODER)
person_timer_spec = userstate.TimerSpec(
PERSON_EXPIRING, userstate.TimeDomain.WATERMARK)
def __init__(self, max_auction_wait_time):
self.max_auction_wait_time = max_auction_wait_time
def process(
self,
element,
auction_state=beam.DoFn.StateParam(auction_spec),
person_state=beam.DoFn.StateParam(person_spec),
person_timer=beam.DoFn.TimerParam(person_timer_spec)):
_, group = element
existing_person = person_state.read()
if existing_person:
for auction in group[nexmark_query_util.AUCTION_TAG]:
yield auction, existing_person
return
new_person = None
for person in group[nexmark_query_util.PERSON_TAG]:
if not new_person:
new_person = person
else:
logging.error(
'two new person wtih same key: %s and %s' % (person, new_person))
continue
pending_auctions = auction_state.read()
if pending_auctions:
for pending_auction in pending_auctions:
yield pending_auction, new_person
auction_state.clear()
for auction in group[nexmark_query_util.AUCTION_TAG]:
yield auction, new_person
person_state.write(new_person)
person_timer.set(new_person.date_time + self.max_auction_wait_time)
if new_person:
return
for auction in group[nexmark_query_util.AUCTION_TAG]:
auction_state.add(auction)
@on_timer(person_timer_spec)
def expiry(self, person_state=beam.DoFn.StateParam(person_spec)):
person_state.clear()
| true | true |
f723f5e67a95794ea25c2e636d620a2789ac60ad | 48,091 | py | Python | core/controllers/admin.py | Ragify/oppia | a530c7e4d5274b646afc7dd7040d13c7ed45b829 | [
"Apache-2.0"
] | 4 | 2021-09-16T16:46:53.000Z | 2022-02-06T13:00:14.000Z | core/controllers/admin.py | Ragify/oppia | a530c7e4d5274b646afc7dd7040d13c7ed45b829 | [
"Apache-2.0"
] | null | null | null | core/controllers/admin.py | Ragify/oppia | a530c7e4d5274b646afc7dd7040d13c7ed45b829 | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Controllers for the admin view."""
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import logging
import random
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.controllers import domain_objects_validator as validation_method
from core.domain import auth_services
from core.domain import blog_services
from core.domain import collection_services
from core.domain import config_domain
from core.domain import config_services
from core.domain import email_manager
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import opportunity_services
from core.domain import platform_feature_services as feature_services
from core.domain import platform_parameter_domain as parameter_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import role_services
from core.domain import search_services
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_service
class AdminPage(base.BaseHandler):
"""Admin page shown in the App Engine admin console."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
"""Handles GET requests."""
self.render_template('admin-page.mainpage.html')
class AdminHandler(base.BaseHandler):
"""Handler for the admin page."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {},
'POST': {
'action': {
'schema': {
'type': 'basestring',
'choices': [
'reload_exploration', 'reload_collection',
'generate_dummy_explorations', 'clear_search_index',
'generate_dummy_new_structures_data',
'generate_dummy_new_skill_data',
'save_config_properties', 'revert_config_property',
'upload_topic_similarities',
'regenerate_topic_related_opportunities',
'update_feature_flag_rules'
]
},
# TODO(#13331): Remove default_value when it is confirmed that,
# for clearing the search indices of exploration & collection
# 'action' field must be provided in the payload.
'default_value': None
},
'exploration_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'collection_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'num_dummy_exps_to_generate': {
'schema': {
'type': 'int'
},
'default_value': None
},
'num_dummy_exps_to_publish': {
'schema': {
'type': 'int'
},
'default_value': None
},
'new_config_property_values': {
'schema': {
'type': 'object_dict',
'validation_method': (
validation_method.validate_new_config_property_values)
},
'default_value': None
},
'config_property_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'data': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'topic_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'feature_name': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'commit_message': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'new_rules': {
'schema': {
'type': 'list',
'items': {
'type': 'object_dict',
'object_class': parameter_domain.PlatformParameterRule
}
},
'default_value': None
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
"""Handles GET requests."""
demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys())
topic_summaries = topic_fetchers.get_all_topic_summaries()
topic_summary_dicts = [
summary.to_dict() for summary in topic_summaries]
feature_flag_dicts = feature_services.get_all_feature_flag_dicts()
config_properties = config_domain.Registry.get_config_property_schemas()
# Removes promo-bar related configs as promo-bar is handlded by
# release coordinators in /release-coordinator page.
del config_properties['promo_bar_enabled']
del config_properties['promo_bar_message']
# Remove blog related configs as they will be handled by 'blog admins'
# on blog admin page.
del config_properties['max_number_of_tags_assigned_to_blog_post']
del config_properties['list_of_default_tags_for_blog_post']
self.render_json({
'config_properties': config_properties,
'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()),
'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()),
'demo_exploration_ids': demo_exploration_ids,
'updatable_roles': role_services.UPDATABLE_ROLES,
'viewable_roles': role_services.VIEWABLE_ROLES,
'human_readable_roles': role_services.HUMAN_READABLE_ROLES,
'role_to_actions': role_services.get_role_actions(),
'topic_summaries': topic_summary_dicts,
'feature_flags': feature_flag_dicts,
})
@acl_decorators.can_access_admin_page
def post(self):
"""Handles POST requests."""
action = self.normalized_payload.get('action')
try:
result = {}
if action == 'reload_exploration':
exploration_id = self.normalized_payload.get('exploration_id')
self._reload_exploration(exploration_id)
elif action == 'reload_collection':
collection_id = self.normalized_payload.get('collection_id')
self._reload_collection(collection_id)
elif action == 'generate_dummy_explorations':
num_dummy_exps_to_generate = self.normalized_payload.get(
'num_dummy_exps_to_generate')
num_dummy_exps_to_publish = self.normalized_payload.get(
'num_dummy_exps_to_publish')
if num_dummy_exps_to_generate < num_dummy_exps_to_publish:
raise self.InvalidInputException(
'Generate count cannot be less than publish count')
else:
self._generate_dummy_explorations(
num_dummy_exps_to_generate, num_dummy_exps_to_publish)
elif action == 'clear_search_index':
search_services.clear_collection_search_index()
search_services.clear_exploration_search_index()
elif action == 'generate_dummy_new_structures_data':
self._load_dummy_new_structures_data()
elif action == 'generate_dummy_new_skill_data':
self._generate_dummy_skill_and_questions()
elif action == 'save_config_properties':
new_config_property_values = self.normalized_payload.get(
'new_config_property_values')
logging.info(
'[ADMIN] %s saved config property values: %s' %
(self.user_id, new_config_property_values))
for (name, value) in new_config_property_values.items():
config_services.set_property(self.user_id, name, value)
elif action == 'revert_config_property':
config_property_id = self.normalized_payload.get(
'config_property_id')
logging.info(
'[ADMIN] %s reverted config property: %s' %
(self.user_id, config_property_id))
config_services.revert_property(
self.user_id, config_property_id)
elif action == 'upload_topic_similarities':
data = self.normalized_payload.get('data')
recommendations_services.update_topic_similarities(data)
elif action == 'regenerate_topic_related_opportunities':
topic_id = self.normalized_payload.get('topic_id')
opportunities_count = (
opportunity_services
.regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=True))
result = {
'opportunities_count': opportunities_count
}
elif action == 'update_feature_flag_rules':
feature_name = self.normalized_payload.get('feature_name')
new_rule_dicts = self.normalized_payload.get('new_rules')
commit_message = self.normalized_payload.get('commit_message')
try:
feature_services.update_feature_flag_rules(
feature_name, self.user_id, commit_message,
new_rule_dicts)
except (
utils.ValidationError,
feature_services.FeatureFlagNotFoundException) as e:
raise self.InvalidInputException(e)
logging.info(
'[ADMIN] %s updated feature %s with new rules: '
'%s.' % (self.user_id, feature_name, new_rule_dicts))
self.render_json(result)
except Exception as e:
logging.exception('[ADMIN] %s', e)
self.render_json({'error': python_utils.UNICODE(e)})
python_utils.reraise_exception()
def _reload_exploration(self, exploration_id):
"""Reloads the exploration in dev_mode corresponding to the given
exploration id.
Args:
exploration_id: str. The exploration id.
Raises:
Exception. Cannot reload an exploration in production.
"""
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded exploration %s' %
(self.user_id, exploration_id))
exp_services.load_demo(python_utils.UNICODE(exploration_id))
rights_manager.release_ownership_of_exploration(
user_services.get_system_user(),
python_utils.UNICODE(exploration_id))
else:
raise Exception('Cannot reload an exploration in production.')
def _create_dummy_question(
self, question_id, question_content, linked_skill_ids):
"""Creates a dummy question object with the given question ID.
Args:
question_id: str. The ID of the question to be created.
question_content: str. The question content.
linked_skill_ids: list(str). The IDs of the skills to which the
question is linked to.
Returns:
Question. The dummy question with given values.
"""
state = state_domain.State.create_default_state(
'ABC', is_initial_state=True)
state.update_interaction_id('TextInput')
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
})
state.update_next_content_id_index(1)
state.update_linked_skill_id(None)
state.update_content(state_domain.SubtitledHtml('1', question_content))
recorded_voiceovers = state_domain.RecordedVoiceovers({})
written_translations = state_domain.WrittenTranslations({})
recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0')
recorded_voiceovers.add_content_id_for_voiceover('1')
recorded_voiceovers.add_content_id_for_voiceover('default_outcome')
written_translations.add_content_id_for_translation('ca_placeholder_0')
written_translations.add_content_id_for_translation('1')
written_translations.add_content_id_for_translation('default_outcome')
state.update_recorded_voiceovers(recorded_voiceovers)
state.update_written_translations(written_translations)
solution = state_domain.Solution(
'TextInput', False, 'Solution', state_domain.SubtitledHtml(
'solution', '<p>This is a solution.</p>'))
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')
)
]
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_default_outcome(
state_domain.Outcome(
None, state_domain.SubtitledHtml(
'feedback_id', '<p>Dummy Feedback</p>'),
True, [], None, None
)
)
question = question_domain.Question(
question_id, state,
feconf.CURRENT_STATE_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, [])
return question
def _create_dummy_skill(self, skill_id, skill_description, explanation):
"""Creates a dummy skill object with the given values.
Args:
skill_id: str. The ID of the skill to be created.
skill_description: str. The description of the skill.
explanation: str. The review material for the skill.
Returns:
Skill. The dummy skill with given values.
"""
rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])]
skill = skill_domain.Skill.create_default_skill(
skill_id, skill_description, rubrics)
skill.update_explanation(state_domain.SubtitledHtml('1', explanation))
return skill
def _load_dummy_new_structures_data(self):
"""Loads the database with two topics (one of which is empty), a story
and three skills in the topic (two of them in a subtopic) and a question
attached to each skill.
Raises:
Exception. Cannot load new structures data in production mode.
Exception. User does not have enough rights to generate data.
"""
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
topic_id_1 = topic_fetchers.get_new_topic_id()
topic_id_2 = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
skill_id_1 = skill_services.get_new_skill_id()
skill_id_2 = skill_services.get_new_skill_id()
skill_id_3 = skill_services.get_new_skill_id()
question_id_1 = question_services.get_new_question_id()
question_id_2 = question_services.get_new_question_id()
question_id_3 = question_services.get_new_question_id()
skill_1 = self._create_dummy_skill(
skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>')
skill_2 = self._create_dummy_skill(
skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>')
skill_3 = self._create_dummy_skill(
skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>')
question_1 = self._create_dummy_question(
question_id_1, 'Question 1', [skill_id_1])
question_2 = self._create_dummy_question(
question_id_2, 'Question 2', [skill_id_2])
question_3 = self._create_dummy_question(
question_id_3, 'Question 3', [skill_id_3])
question_services.add_question(self.user_id, question_1)
question_services.add_question(self.user_id, question_2)
question_services.add_question(self.user_id, question_3)
question_services.create_new_question_skill_link(
self.user_id, question_id_1, skill_id_1, 0.3)
question_services.create_new_question_skill_link(
self.user_id, question_id_2, skill_id_2, 0.5)
question_services.create_new_question_skill_link(
self.user_id, question_id_3, skill_id_3, 0.7)
topic_1 = topic_domain.Topic.create_default_topic(
topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description')
topic_2 = topic_domain.Topic.create_default_topic(
topic_id_2, 'Empty Topic', 'empty-topic', 'description')
topic_1.add_canonical_story(story_id)
topic_1.add_uncategorized_skill_id(skill_id_1)
topic_1.add_uncategorized_skill_id(skill_id_2)
topic_1.add_uncategorized_skill_id(skill_id_3)
topic_1.add_subtopic(1, 'Dummy Subtopic Title')
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2)
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3)
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
1, topic_id_1))
# These explorations were chosen since they pass the validations
# for published stories.
self._reload_exploration('15')
self._reload_exploration('25')
self._reload_exploration('13')
exp_services.update_exploration(
self.user_id, '15', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '25', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '13', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
story = story_domain.Story.create_default_story(
story_id, 'Help Jaime win the Arcade', 'Description',
topic_id_1, 'help-jamie-win-arcade')
story_node_dicts = [{
'exp_id': '15',
'title': 'What are the place values?',
'description': 'Jaime learns the place value of each digit ' +
'in a big number.'
}, {
'exp_id': '25',
'title': 'Finding the value of a number',
'description': 'Jaime understands the value of his ' +
'arcade score.'
}, {
'exp_id': '13',
'title': 'Comparing Numbers',
'description': 'Jaime learns if a number is smaller or ' +
'greater than another number.'
}]
def generate_dummy_story_nodes(node_id, exp_id, title, description):
"""Generates and connects sequential story nodes.
Args:
node_id: int. The node id.
exp_id: str. The exploration id.
title: str. The title of the story node.
description: str. The description of the story node.
"""
story.add_node(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
title)
story.update_node_description(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
description)
story.update_node_exploration_id(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id)
if node_id != len(story_node_dicts):
story.update_node_destination_node_ids(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)])
exp_services.update_exploration(
self.user_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'category',
'new_value': 'Astronomy'
})], 'Change category')
for i, story_node_dict in enumerate(story_node_dicts):
generate_dummy_story_nodes(i + 1, **story_node_dict)
skill_services.save_new_skill(self.user_id, skill_1)
skill_services.save_new_skill(self.user_id, skill_2)
skill_services.save_new_skill(self.user_id, skill_3)
story_services.save_new_story(self.user_id, story)
topic_services.save_new_topic(self.user_id, topic_1)
topic_services.save_new_topic(self.user_id, topic_2)
subtopic_page_services.save_subtopic_page(
self.user_id, subtopic_page, 'Added subtopic',
[topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'subtopic_id': 1,
'title': 'Dummy Subtopic Title'
})]
)
# Generates translation opportunities for the Contributor Dashboard.
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
opportunity_services.add_new_exploration_opportunities(
story_id, exp_ids_in_story)
topic_services.publish_story(topic_id_1, story_id, self.user_id)
else:
raise Exception('Cannot load new structures data in production.')
def _generate_dummy_skill_and_questions(self):
"""Generate and loads the database with a skill and 15 questions
linked to the skill.
Raises:
Exception. Cannot load new structures data in production mode.
Exception. User does not have enough rights to generate data.
"""
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
skill_id = skill_services.get_new_skill_id()
skill_name = 'Dummy Skill %s' % python_utils.UNICODE(
random.getrandbits(32))
skill = self._create_dummy_skill(
skill_id, skill_name, '<p>Dummy Explanation 1</p>')
skill_services.save_new_skill(self.user_id, skill)
for i in range(15):
question_id = question_services.get_new_question_id()
question_name = 'Question number %s %s' % (
python_utils.UNICODE(i), skill_name)
question = self._create_dummy_question(
question_id, question_name, [skill_id])
question_services.add_question(self.user_id, question)
question_difficulty = list(
constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values())
random_difficulty = random.choice(question_difficulty)
question_services.create_new_question_skill_link(
self.user_id, question_id, skill_id, random_difficulty)
else:
raise Exception('Cannot generate dummy skills in production.')
def _reload_collection(self, collection_id):
"""Reloads the collection in dev_mode corresponding to the given
collection id.
Args:
collection_id: str. The collection id.
Raises:
Exception. Cannot reload a collection in production.
"""
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded collection %s' %
(self.user_id, collection_id))
collection_services.load_demo(collection_id)
rights_manager.release_ownership_of_collection(
user_services.get_system_user(), collection_id)
else:
raise Exception('Cannot reload a collection in production.')
def _generate_dummy_explorations(
self, num_dummy_exps_to_generate, num_dummy_exps_to_publish):
"""Generates and publishes the given number of dummy explorations.
Args:
num_dummy_exps_to_generate: int. Count of dummy explorations to
be generated.
num_dummy_exps_to_publish: int. Count of explorations to
be published.
Raises:
Exception. Environment is not DEVMODE.
"""
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s generated %s number of dummy explorations' %
(self.user_id, num_dummy_exps_to_generate))
possible_titles = ['Hulk Neuroscience', 'Quantum Starks',
'Wonder Anatomy',
'Elvish, language of "Lord of the Rings',
'The Science of Superheroes']
exploration_ids_to_publish = []
for i in range(num_dummy_exps_to_generate):
title = random.choice(possible_titles)
category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES)
new_exploration_id = exp_fetchers.get_new_exploration_id()
exploration = exp_domain.Exploration.create_default_exploration(
new_exploration_id, title=title, category=category,
objective='Dummy Objective')
exp_services.save_new_exploration(self.user_id, exploration)
if i <= num_dummy_exps_to_publish - 1:
exploration_ids_to_publish.append(new_exploration_id)
rights_manager.publish_exploration(
self.user, new_exploration_id)
exp_services.index_explorations_given_ids(
exploration_ids_to_publish)
else:
raise Exception('Cannot generate dummy explorations in production.')
class AdminRoleHandler(base.BaseHandler):
"""Handler for roles tab of admin page. Used to view and update roles."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'filter_criterion': {
'schema': {
'type': 'basestring',
'choices': [
feconf.USER_FILTER_CRITERION_ROLE,
feconf.USER_FILTER_CRITERION_USERNAME
]
}
},
'role': {
'schema': {
'type': 'basestring',
'choices': role_services.VIEWABLE_ROLES
},
'default_value': None
},
'username': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
},
'PUT': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
filter_criterion = self.normalized_request.get(
'filter_criterion')
if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE:
role = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_ROLE)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE,
role=role)
self.render_json({
'usernames': user_services.get_usernames_by_role(role)
})
elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME:
username = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_USERNAME)
user_id = user_services.get_user_id_from_username(username)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME,
username=username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_settings = user_services.get_user_settings(user_id)
user_roles = user_settings.roles
managed_topic_ids = []
if feconf.ROLE_ID_TOPIC_MANAGER in user_roles:
managed_topic_ids = [
rights.id for rights in
topic_fetchers.get_topic_rights_with_user(user_id)]
user_roles_dict = {
'roles': user_roles,
'managed_topic_ids': managed_topic_ids,
'banned': user_settings.banned
}
self.render_json(user_roles_dict)
@acl_decorators.can_access_admin_page
def put(self):
username = self.payload.get('username')
role = self.payload.get('role')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
# The Topic manager role assignment is handled via
# TopicManagerRoleHandler.
raise self.InvalidInputException(
'Unsupported role for this handler.')
user_services.add_user_role(user_settings.user_id, role)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.request.get('username')
role = self.request.get('role')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.remove_user_role(user_id, role)
self.render_json({})
class TopicManagerRoleHandler(base.BaseHandler):
"""Handler to assign or deassigning manager to a topic."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
},
'action': {
'schema': {
'type': 'basestring',
'choices': ['assign', 'deassign']
}
},
'topic_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
action = self.normalized_payload.get('action')
topic_id = self.normalized_payload.get('topic_id')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_id = user_settings.user_id
if action == 'assign':
if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles:
user_services.add_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
topic_manager = user_services.get_user_actions_info(user_id)
topic_services.assign_role(
user_services.get_system_user(),
topic_manager, topic_domain.ROLE_MANAGER, topic_id)
elif action == 'deassign':
topic_services.deassign_manager_role_from_topic(
user_services.get_system_user(), user_id, topic_id)
if not topic_fetchers.get_topic_rights_with_user(user_id):
user_services.remove_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.render_json({})
class BannedUsersHandler(base.BaseHandler):
"""Handler to ban and unban users."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.mark_user_banned(user_id)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.normalized_request.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_services.unmark_user_banned(user_id)
self.render_json({})
class AdminSuperAdminPrivilegesHandler(base.BaseHandler):
"""Handler for granting a user super admin privileges."""
PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException('No such user exists')
auth_services.grant_super_admin_privileges(user_id)
self.render_json(self.values)
@acl_decorators.can_access_admin_page
def delete(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_request.get('username')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException('No such user exists')
if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS:
raise self.InvalidInputException(
'Cannot revoke privileges from the default super admin account')
auth_services.revoke_super_admin_privileges(user_settings.user_id)
self.render_json(self.values)
class AdminTopicsCsvFileDownloader(base.BaseHandler):
"""Retrieves topic similarity data for download."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
topic_similarities = (
recommendations_services.get_topic_similarities_as_csv()
)
# Downloadable file accepts only bytes, so we need to encode
# topic_similarities to bytes.
self.render_downloadable_file(
io.BytesIO(topic_similarities.encode('utf-8')),
'topic_similarities.csv',
'text/csv'
)
class DataExtractionQueryHandler(base.BaseHandler):
"""Handler for data extraction query."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'exp_id': {
'schema': {
'type': 'basestring'
}
},
'exp_version': {
'schema': {
'type': 'int'
}
},
'state_name': {
'schema': {
'type': 'basestring'
}
},
'num_answers': {
'schema': {
'type': 'int'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
exp_id = self.normalized_request.get('exp_id')
exp_version = self.normalized_request.get('exp_version')
exploration = exp_fetchers.get_exploration_by_id(
exp_id, strict=False, version=exp_version)
if exploration is None:
raise self.InvalidInputException(
'Entity for exploration with id %s and version %s not found.'
% (exp_id, exp_version))
state_name = self.normalized_request.get('state_name')
num_answers = self.normalized_request.get('num_answers')
if state_name not in exploration.states:
raise self.InvalidInputException(
'Exploration \'%s\' does not have \'%s\' state.'
% (exp_id, state_name))
state_answers = stats_services.get_state_answers(
exp_id, exp_version, state_name)
extracted_answers = state_answers.get_submitted_answer_dict_list()
if num_answers > 0:
extracted_answers = extracted_answers[:num_answers]
response = {
'data': extracted_answers
}
self.render_json(response)
class SendDummyMailToAdminHandler(base.BaseHandler):
"""This function handles sending test emails."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'POST': {}}
@acl_decorators.can_access_admin_page
def post(self):
username = self.username
if feconf.CAN_SEND_EMAILS:
email_manager.send_dummy_mail_to_admin(username)
self.render_json({})
else:
raise self.InvalidInputException('This app cannot send emails.')
class UpdateUsernameHandler(base.BaseHandler):
"""Handler for renaming usernames."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'old_username': {
'schema': {
'type': 'basestring'
}
},
'new_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
old_username = self.normalized_payload.get('old_username')
new_username = self.normalized_payload.get('new_username')
user_id = user_services.get_user_id_from_username(old_username)
if user_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % old_username)
if user_services.is_username_taken(new_username):
raise self.InvalidInputException('Username already taken.')
user_services.set_username(user_id, new_username)
user_services.log_username_change(
self.user_id, old_username, new_username)
self.render_json({})
class NumberOfDeletionRequestsHandler(base.BaseHandler):
"""Handler for getting the number of pending deletion requests via admin
page.
"""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
self.render_json({
'number_of_pending_deletion_models': (
wipeout_service.get_number_of_pending_deletion_requests())
})
class VerifyUserModelsDeletedHandler(base.BaseHandler):
"""Handler for getting whether any models exist for specific user ID."""
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'user_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
user_id = self.normalized_request.get('user_id')
user_is_deleted = wipeout_service.verify_user_deleted(
user_id, include_delete_at_end_models=True)
self.render_json({'related_models_exist': not user_is_deleted})
class DeleteUserHandler(base.BaseHandler):
"""Handler for deleting a user with specific ID."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'DELETE': {
'user_id': {
'schema': {
'type': 'basestring'
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_delete_any_user
def delete(self):
user_id = self.normalized_request.get('user_id')
username = self.normalized_request.get('username')
user_id_from_username = (
user_services.get_user_id_from_username(username))
if user_id_from_username is None:
raise self.InvalidInputException(
'The username doesn\'t belong to any user'
)
if user_id_from_username != user_id:
raise self.InvalidInputException(
'The user ID retrieved from the username and '
'the user ID provided by admin differ.'
)
wipeout_service.pre_delete_user(user_id)
self.render_json({'success': True})
class UpdateBlogPostHandler(base.BaseHandler):
"""Handler for changing author ids and published on date in
blog posts."""
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'blog_post_id': {
'schema': {
'type': 'basestring'
}
},
'author_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
},
'published_on': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
blog_post_id = self.normalized_payload.get('blog_post_id')
author_username = self.normalized_payload.get('author_username')
published_on = self.normalized_payload.get('published_on')
author_id = user_services.get_user_id_from_username(author_username)
if author_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % author_username)
user_actions = user_services.get_user_actions_info(author_id).actions
if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions:
raise self.InvalidInputException(
'User does not have enough rights to be blog post author.')
blog_post = (
blog_services.get_blog_post_by_id(blog_post_id, strict=False))
if blog_post is None:
raise self.PageNotFoundException(
Exception(
'The blog post with the given id or url doesn\'t exist.'))
blog_services.update_blog_models_author_and_published_on_date(
blog_post_id, author_id, published_on)
self.render_json({})
| 38.814366 | 80 | 0.587823 |
from __future__ import absolute_import
from __future__ import unicode_literals
import io
import logging
import random
from core import feconf
from core import python_utils
from core import utils
from core.constants import constants
from core.controllers import acl_decorators
from core.controllers import base
from core.controllers import domain_objects_validator as validation_method
from core.domain import auth_services
from core.domain import blog_services
from core.domain import collection_services
from core.domain import config_domain
from core.domain import config_services
from core.domain import email_manager
from core.domain import exp_domain
from core.domain import exp_fetchers
from core.domain import exp_services
from core.domain import opportunity_services
from core.domain import platform_feature_services as feature_services
from core.domain import platform_parameter_domain as parameter_domain
from core.domain import question_domain
from core.domain import question_services
from core.domain import recommendations_services
from core.domain import rights_manager
from core.domain import role_services
from core.domain import search_services
from core.domain import skill_domain
from core.domain import skill_services
from core.domain import state_domain
from core.domain import stats_services
from core.domain import story_domain
from core.domain import story_services
from core.domain import subtopic_page_domain
from core.domain import subtopic_page_services
from core.domain import topic_domain
from core.domain import topic_fetchers
from core.domain import topic_services
from core.domain import user_services
from core.domain import wipeout_service
class AdminPage(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
self.render_template('admin-page.mainpage.html')
class AdminHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {},
'POST': {
'action': {
'schema': {
'type': 'basestring',
'choices': [
'reload_exploration', 'reload_collection',
'generate_dummy_explorations', 'clear_search_index',
'generate_dummy_new_structures_data',
'generate_dummy_new_skill_data',
'save_config_properties', 'revert_config_property',
'upload_topic_similarities',
'regenerate_topic_related_opportunities',
'update_feature_flag_rules'
]
},
ult_value': None
},
'exploration_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'collection_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'num_dummy_exps_to_generate': {
'schema': {
'type': 'int'
},
'default_value': None
},
'num_dummy_exps_to_publish': {
'schema': {
'type': 'int'
},
'default_value': None
},
'new_config_property_values': {
'schema': {
'type': 'object_dict',
'validation_method': (
validation_method.validate_new_config_property_values)
},
'default_value': None
},
'config_property_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'data': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'topic_id': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'feature_name': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'commit_message': {
'schema': {
'type': 'basestring'
},
'default_value': None
},
'new_rules': {
'schema': {
'type': 'list',
'items': {
'type': 'object_dict',
'object_class': parameter_domain.PlatformParameterRule
}
},
'default_value': None
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
demo_exploration_ids = list(feconf.DEMO_EXPLORATIONS.keys())
topic_summaries = topic_fetchers.get_all_topic_summaries()
topic_summary_dicts = [
summary.to_dict() for summary in topic_summaries]
feature_flag_dicts = feature_services.get_all_feature_flag_dicts()
config_properties = config_domain.Registry.get_config_property_schemas()
del config_properties['promo_bar_enabled']
del config_properties['promo_bar_message']
del config_properties['max_number_of_tags_assigned_to_blog_post']
del config_properties['list_of_default_tags_for_blog_post']
self.render_json({
'config_properties': config_properties,
'demo_collections': sorted(feconf.DEMO_COLLECTIONS.items()),
'demo_explorations': sorted(feconf.DEMO_EXPLORATIONS.items()),
'demo_exploration_ids': demo_exploration_ids,
'updatable_roles': role_services.UPDATABLE_ROLES,
'viewable_roles': role_services.VIEWABLE_ROLES,
'human_readable_roles': role_services.HUMAN_READABLE_ROLES,
'role_to_actions': role_services.get_role_actions(),
'topic_summaries': topic_summary_dicts,
'feature_flags': feature_flag_dicts,
})
@acl_decorators.can_access_admin_page
def post(self):
action = self.normalized_payload.get('action')
try:
result = {}
if action == 'reload_exploration':
exploration_id = self.normalized_payload.get('exploration_id')
self._reload_exploration(exploration_id)
elif action == 'reload_collection':
collection_id = self.normalized_payload.get('collection_id')
self._reload_collection(collection_id)
elif action == 'generate_dummy_explorations':
num_dummy_exps_to_generate = self.normalized_payload.get(
'num_dummy_exps_to_generate')
num_dummy_exps_to_publish = self.normalized_payload.get(
'num_dummy_exps_to_publish')
if num_dummy_exps_to_generate < num_dummy_exps_to_publish:
raise self.InvalidInputException(
'Generate count cannot be less than publish count')
else:
self._generate_dummy_explorations(
num_dummy_exps_to_generate, num_dummy_exps_to_publish)
elif action == 'clear_search_index':
search_services.clear_collection_search_index()
search_services.clear_exploration_search_index()
elif action == 'generate_dummy_new_structures_data':
self._load_dummy_new_structures_data()
elif action == 'generate_dummy_new_skill_data':
self._generate_dummy_skill_and_questions()
elif action == 'save_config_properties':
new_config_property_values = self.normalized_payload.get(
'new_config_property_values')
logging.info(
'[ADMIN] %s saved config property values: %s' %
(self.user_id, new_config_property_values))
for (name, value) in new_config_property_values.items():
config_services.set_property(self.user_id, name, value)
elif action == 'revert_config_property':
config_property_id = self.normalized_payload.get(
'config_property_id')
logging.info(
'[ADMIN] %s reverted config property: %s' %
(self.user_id, config_property_id))
config_services.revert_property(
self.user_id, config_property_id)
elif action == 'upload_topic_similarities':
data = self.normalized_payload.get('data')
recommendations_services.update_topic_similarities(data)
elif action == 'regenerate_topic_related_opportunities':
topic_id = self.normalized_payload.get('topic_id')
opportunities_count = (
opportunity_services
.regenerate_opportunities_related_to_topic(
topic_id, delete_existing_opportunities=True))
result = {
'opportunities_count': opportunities_count
}
elif action == 'update_feature_flag_rules':
feature_name = self.normalized_payload.get('feature_name')
new_rule_dicts = self.normalized_payload.get('new_rules')
commit_message = self.normalized_payload.get('commit_message')
try:
feature_services.update_feature_flag_rules(
feature_name, self.user_id, commit_message,
new_rule_dicts)
except (
utils.ValidationError,
feature_services.FeatureFlagNotFoundException) as e:
raise self.InvalidInputException(e)
logging.info(
'[ADMIN] %s updated feature %s with new rules: '
'%s.' % (self.user_id, feature_name, new_rule_dicts))
self.render_json(result)
except Exception as e:
logging.exception('[ADMIN] %s', e)
self.render_json({'error': python_utils.UNICODE(e)})
python_utils.reraise_exception()
def _reload_exploration(self, exploration_id):
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded exploration %s' %
(self.user_id, exploration_id))
exp_services.load_demo(python_utils.UNICODE(exploration_id))
rights_manager.release_ownership_of_exploration(
user_services.get_system_user(),
python_utils.UNICODE(exploration_id))
else:
raise Exception('Cannot reload an exploration in production.')
def _create_dummy_question(
self, question_id, question_content, linked_skill_ids):
state = state_domain.State.create_default_state(
'ABC', is_initial_state=True)
state.update_interaction_id('TextInput')
state.update_interaction_customization_args({
'placeholder': {
'value': {
'content_id': 'ca_placeholder_0',
'unicode_str': ''
}
},
'rows': {'value': 1}
})
state.update_next_content_id_index(1)
state.update_linked_skill_id(None)
state.update_content(state_domain.SubtitledHtml('1', question_content))
recorded_voiceovers = state_domain.RecordedVoiceovers({})
written_translations = state_domain.WrittenTranslations({})
recorded_voiceovers.add_content_id_for_voiceover('ca_placeholder_0')
recorded_voiceovers.add_content_id_for_voiceover('1')
recorded_voiceovers.add_content_id_for_voiceover('default_outcome')
written_translations.add_content_id_for_translation('ca_placeholder_0')
written_translations.add_content_id_for_translation('1')
written_translations.add_content_id_for_translation('default_outcome')
state.update_recorded_voiceovers(recorded_voiceovers)
state.update_written_translations(written_translations)
solution = state_domain.Solution(
'TextInput', False, 'Solution', state_domain.SubtitledHtml(
'solution', '<p>This is a solution.</p>'))
hints_list = [
state_domain.Hint(
state_domain.SubtitledHtml('hint_1', '<p>This is a hint.</p>')
)
]
state.update_interaction_solution(solution)
state.update_interaction_hints(hints_list)
state.update_interaction_default_outcome(
state_domain.Outcome(
None, state_domain.SubtitledHtml(
'feedback_id', '<p>Dummy Feedback</p>'),
True, [], None, None
)
)
question = question_domain.Question(
question_id, state,
feconf.CURRENT_STATE_SCHEMA_VERSION,
constants.DEFAULT_LANGUAGE_CODE, 0, linked_skill_ids, [])
return question
def _create_dummy_skill(self, skill_id, skill_description, explanation):
rubrics = [
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[0], ['Explanation 1']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[1], ['Explanation 2']),
skill_domain.Rubric(
constants.SKILL_DIFFICULTIES[2], ['Explanation 3'])]
skill = skill_domain.Skill.create_default_skill(
skill_id, skill_description, rubrics)
skill.update_explanation(state_domain.SubtitledHtml('1', explanation))
return skill
def _load_dummy_new_structures_data(self):
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
topic_id_1 = topic_fetchers.get_new_topic_id()
topic_id_2 = topic_fetchers.get_new_topic_id()
story_id = story_services.get_new_story_id()
skill_id_1 = skill_services.get_new_skill_id()
skill_id_2 = skill_services.get_new_skill_id()
skill_id_3 = skill_services.get_new_skill_id()
question_id_1 = question_services.get_new_question_id()
question_id_2 = question_services.get_new_question_id()
question_id_3 = question_services.get_new_question_id()
skill_1 = self._create_dummy_skill(
skill_id_1, 'Dummy Skill 1', '<p>Dummy Explanation 1</p>')
skill_2 = self._create_dummy_skill(
skill_id_2, 'Dummy Skill 2', '<p>Dummy Explanation 2</p>')
skill_3 = self._create_dummy_skill(
skill_id_3, 'Dummy Skill 3', '<p>Dummy Explanation 3</p>')
question_1 = self._create_dummy_question(
question_id_1, 'Question 1', [skill_id_1])
question_2 = self._create_dummy_question(
question_id_2, 'Question 2', [skill_id_2])
question_3 = self._create_dummy_question(
question_id_3, 'Question 3', [skill_id_3])
question_services.add_question(self.user_id, question_1)
question_services.add_question(self.user_id, question_2)
question_services.add_question(self.user_id, question_3)
question_services.create_new_question_skill_link(
self.user_id, question_id_1, skill_id_1, 0.3)
question_services.create_new_question_skill_link(
self.user_id, question_id_2, skill_id_2, 0.5)
question_services.create_new_question_skill_link(
self.user_id, question_id_3, skill_id_3, 0.7)
topic_1 = topic_domain.Topic.create_default_topic(
topic_id_1, 'Dummy Topic 1', 'dummy-topic-one', 'description')
topic_2 = topic_domain.Topic.create_default_topic(
topic_id_2, 'Empty Topic', 'empty-topic', 'description')
topic_1.add_canonical_story(story_id)
topic_1.add_uncategorized_skill_id(skill_id_1)
topic_1.add_uncategorized_skill_id(skill_id_2)
topic_1.add_uncategorized_skill_id(skill_id_3)
topic_1.add_subtopic(1, 'Dummy Subtopic Title')
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_2)
topic_1.move_skill_id_to_subtopic(None, 1, skill_id_3)
subtopic_page = (
subtopic_page_domain.SubtopicPage.create_default_subtopic_page(
1, topic_id_1))
self._reload_exploration('15')
self._reload_exploration('25')
self._reload_exploration('13')
exp_services.update_exploration(
self.user_id, '15', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '25', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
exp_services.update_exploration(
self.user_id, '13', [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'correctness_feedback_enabled',
'new_value': True
})], 'Changed correctness_feedback_enabled.')
story = story_domain.Story.create_default_story(
story_id, 'Help Jaime win the Arcade', 'Description',
topic_id_1, 'help-jamie-win-arcade')
story_node_dicts = [{
'exp_id': '15',
'title': 'What are the place values?',
'description': 'Jaime learns the place value of each digit ' +
'in a big number.'
}, {
'exp_id': '25',
'title': 'Finding the value of a number',
'description': 'Jaime understands the value of his ' +
'arcade score.'
}, {
'exp_id': '13',
'title': 'Comparing Numbers',
'description': 'Jaime learns if a number is smaller or ' +
'greater than another number.'
}]
def generate_dummy_story_nodes(node_id, exp_id, title, description):
story.add_node(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
title)
story.update_node_description(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
description)
story.update_node_exploration_id(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id), exp_id)
if node_id != len(story_node_dicts):
story.update_node_destination_node_ids(
'%s%d' % (story_domain.NODE_ID_PREFIX, node_id),
['%s%d' % (story_domain.NODE_ID_PREFIX, node_id + 1)])
exp_services.update_exploration(
self.user_id, exp_id, [exp_domain.ExplorationChange({
'cmd': exp_domain.CMD_EDIT_EXPLORATION_PROPERTY,
'property_name': 'category',
'new_value': 'Astronomy'
})], 'Change category')
for i, story_node_dict in enumerate(story_node_dicts):
generate_dummy_story_nodes(i + 1, **story_node_dict)
skill_services.save_new_skill(self.user_id, skill_1)
skill_services.save_new_skill(self.user_id, skill_2)
skill_services.save_new_skill(self.user_id, skill_3)
story_services.save_new_story(self.user_id, story)
topic_services.save_new_topic(self.user_id, topic_1)
topic_services.save_new_topic(self.user_id, topic_2)
subtopic_page_services.save_subtopic_page(
self.user_id, subtopic_page, 'Added subtopic',
[topic_domain.TopicChange({
'cmd': topic_domain.CMD_ADD_SUBTOPIC,
'subtopic_id': 1,
'title': 'Dummy Subtopic Title'
})]
)
exp_ids_in_story = story.story_contents.get_all_linked_exp_ids()
opportunity_services.add_new_exploration_opportunities(
story_id, exp_ids_in_story)
topic_services.publish_story(topic_id_1, story_id, self.user_id)
else:
raise Exception('Cannot load new structures data in production.')
def _generate_dummy_skill_and_questions(self):
if constants.DEV_MODE:
if feconf.ROLE_ID_CURRICULUM_ADMIN not in self.user.roles:
raise Exception(
'User does not have enough rights to generate data.')
skill_id = skill_services.get_new_skill_id()
skill_name = 'Dummy Skill %s' % python_utils.UNICODE(
random.getrandbits(32))
skill = self._create_dummy_skill(
skill_id, skill_name, '<p>Dummy Explanation 1</p>')
skill_services.save_new_skill(self.user_id, skill)
for i in range(15):
question_id = question_services.get_new_question_id()
question_name = 'Question number %s %s' % (
python_utils.UNICODE(i), skill_name)
question = self._create_dummy_question(
question_id, question_name, [skill_id])
question_services.add_question(self.user_id, question)
question_difficulty = list(
constants.SKILL_DIFFICULTY_LABEL_TO_FLOAT.values())
random_difficulty = random.choice(question_difficulty)
question_services.create_new_question_skill_link(
self.user_id, question_id, skill_id, random_difficulty)
else:
raise Exception('Cannot generate dummy skills in production.')
def _reload_collection(self, collection_id):
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s reloaded collection %s' %
(self.user_id, collection_id))
collection_services.load_demo(collection_id)
rights_manager.release_ownership_of_collection(
user_services.get_system_user(), collection_id)
else:
raise Exception('Cannot reload a collection in production.')
def _generate_dummy_explorations(
self, num_dummy_exps_to_generate, num_dummy_exps_to_publish):
if constants.DEV_MODE:
logging.info(
'[ADMIN] %s generated %s number of dummy explorations' %
(self.user_id, num_dummy_exps_to_generate))
possible_titles = ['Hulk Neuroscience', 'Quantum Starks',
'Wonder Anatomy',
'Elvish, language of "Lord of the Rings',
'The Science of Superheroes']
exploration_ids_to_publish = []
for i in range(num_dummy_exps_to_generate):
title = random.choice(possible_titles)
category = random.choice(constants.SEARCH_DROPDOWN_CATEGORIES)
new_exploration_id = exp_fetchers.get_new_exploration_id()
exploration = exp_domain.Exploration.create_default_exploration(
new_exploration_id, title=title, category=category,
objective='Dummy Objective')
exp_services.save_new_exploration(self.user_id, exploration)
if i <= num_dummy_exps_to_publish - 1:
exploration_ids_to_publish.append(new_exploration_id)
rights_manager.publish_exploration(
self.user, new_exploration_id)
exp_services.index_explorations_given_ids(
exploration_ids_to_publish)
else:
raise Exception('Cannot generate dummy explorations in production.')
class AdminRoleHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'filter_criterion': {
'schema': {
'type': 'basestring',
'choices': [
feconf.USER_FILTER_CRITERION_ROLE,
feconf.USER_FILTER_CRITERION_USERNAME
]
}
},
'role': {
'schema': {
'type': 'basestring',
'choices': role_services.VIEWABLE_ROLES
},
'default_value': None
},
'username': {
'schema': {
'type': 'basestring'
},
'default_value': None
}
},
'PUT': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'role': {
'schema': {
'type': 'basestring',
'choices': feconf.ALLOWED_USER_ROLES
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
filter_criterion = self.normalized_request.get(
'filter_criterion')
if filter_criterion == feconf.USER_FILTER_CRITERION_ROLE:
role = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_ROLE)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_ROLE,
role=role)
self.render_json({
'usernames': user_services.get_usernames_by_role(role)
})
elif filter_criterion == feconf.USER_FILTER_CRITERION_USERNAME:
username = self.normalized_request.get(
feconf.USER_FILTER_CRITERION_USERNAME)
user_id = user_services.get_user_id_from_username(username)
role_services.log_role_query(
self.user_id, feconf.ROLE_ACTION_VIEW_BY_USERNAME,
username=username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_settings = user_services.get_user_settings(user_id)
user_roles = user_settings.roles
managed_topic_ids = []
if feconf.ROLE_ID_TOPIC_MANAGER in user_roles:
managed_topic_ids = [
rights.id for rights in
topic_fetchers.get_topic_rights_with_user(user_id)]
user_roles_dict = {
'roles': user_roles,
'managed_topic_ids': managed_topic_ids,
'banned': user_settings.banned
}
self.render_json(user_roles_dict)
@acl_decorators.can_access_admin_page
def put(self):
username = self.payload.get('username')
role = self.payload.get('role')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
# The Topic manager role assignment is handled via
# TopicManagerRoleHandler.
raise self.InvalidInputException(
'Unsupported role for this handler.')
user_services.add_user_role(user_settings.user_id, role)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.request.get('username')
role = self.request.get('role')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
if role == feconf.ROLE_ID_TOPIC_MANAGER:
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.remove_user_role(user_id, role)
self.render_json({})
class TopicManagerRoleHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
},
'action': {
'schema': {
'type': 'basestring',
'choices': ['assign', 'deassign']
}
},
'topic_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
action = self.normalized_payload.get('action')
topic_id = self.normalized_payload.get('topic_id')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_id = user_settings.user_id
if action == 'assign':
if not feconf.ROLE_ID_TOPIC_MANAGER in user_settings.roles:
user_services.add_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
topic_manager = user_services.get_user_actions_info(user_id)
topic_services.assign_role(
user_services.get_system_user(),
topic_manager, topic_domain.ROLE_MANAGER, topic_id)
elif action == 'deassign':
topic_services.deassign_manager_role_from_topic(
user_services.get_system_user(), user_id, topic_id)
if not topic_fetchers.get_topic_rights_with_user(user_id):
user_services.remove_user_role(
user_id, feconf.ROLE_ID_TOPIC_MANAGER)
self.render_json({})
class BannedUsersHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
topic_services.deassign_user_from_all_topics(self.user, user_id)
user_services.mark_user_banned(user_id)
self.render_json({})
@acl_decorators.can_access_admin_page
def delete(self):
username = self.normalized_request.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException(
'User with given username does not exist.')
user_services.unmark_user_banned(user_id)
self.render_json({})
class AdminSuperAdminPrivilegesHandler(base.BaseHandler):
PUT_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
DELETE_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'username': {
'schema': {
'type': 'basestring'
}
}
},
'DELETE': {
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_payload.get('username')
user_id = user_services.get_user_id_from_username(username)
if user_id is None:
raise self.InvalidInputException('No such user exists')
auth_services.grant_super_admin_privileges(user_id)
self.render_json(self.values)
@acl_decorators.can_access_admin_page
def delete(self):
if self.email != feconf.ADMIN_EMAIL_ADDRESS:
raise self.UnauthorizedUserException(
'Only the default system admin can manage super admins')
username = self.normalized_request.get('username')
user_settings = user_services.get_user_settings_from_username(username)
if user_settings is None:
raise self.InvalidInputException('No such user exists')
if user_settings.email == feconf.ADMIN_EMAIL_ADDRESS:
raise self.InvalidInputException(
'Cannot revoke privileges from the default super admin account')
auth_services.revoke_super_admin_privileges(user_settings.user_id)
self.render_json(self.values)
class AdminTopicsCsvFileDownloader(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_DOWNLOADABLE
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
topic_similarities = (
recommendations_services.get_topic_similarities_as_csv()
)
# Downloadable file accepts only bytes, so we need to encode
# topic_similarities to bytes.
self.render_downloadable_file(
io.BytesIO(topic_similarities.encode('utf-8')),
'topic_similarities.csv',
'text/csv'
)
class DataExtractionQueryHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'exp_id': {
'schema': {
'type': 'basestring'
}
},
'exp_version': {
'schema': {
'type': 'int'
}
},
'state_name': {
'schema': {
'type': 'basestring'
}
},
'num_answers': {
'schema': {
'type': 'int'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
exp_id = self.normalized_request.get('exp_id')
exp_version = self.normalized_request.get('exp_version')
exploration = exp_fetchers.get_exploration_by_id(
exp_id, strict=False, version=exp_version)
if exploration is None:
raise self.InvalidInputException(
'Entity for exploration with id %s and version %s not found.'
% (exp_id, exp_version))
state_name = self.normalized_request.get('state_name')
num_answers = self.normalized_request.get('num_answers')
if state_name not in exploration.states:
raise self.InvalidInputException(
'Exploration \'%s\' does not have \'%s\' state.'
% (exp_id, state_name))
state_answers = stats_services.get_state_answers(
exp_id, exp_version, state_name)
extracted_answers = state_answers.get_submitted_answer_dict_list()
if num_answers > 0:
extracted_answers = extracted_answers[:num_answers]
response = {
'data': extracted_answers
}
self.render_json(response)
class SendDummyMailToAdminHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'POST': {}}
@acl_decorators.can_access_admin_page
def post(self):
username = self.username
if feconf.CAN_SEND_EMAILS:
email_manager.send_dummy_mail_to_admin(username)
self.render_json({})
else:
raise self.InvalidInputException('This app cannot send emails.')
class UpdateUsernameHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'old_username': {
'schema': {
'type': 'basestring'
}
},
'new_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
old_username = self.normalized_payload.get('old_username')
new_username = self.normalized_payload.get('new_username')
user_id = user_services.get_user_id_from_username(old_username)
if user_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % old_username)
if user_services.is_username_taken(new_username):
raise self.InvalidInputException('Username already taken.')
user_services.set_username(user_id, new_username)
user_services.log_username_change(
self.user_id, old_username, new_username)
self.render_json({})
class NumberOfDeletionRequestsHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {'GET': {}}
@acl_decorators.can_access_admin_page
def get(self):
self.render_json({
'number_of_pending_deletion_models': (
wipeout_service.get_number_of_pending_deletion_requests())
})
class VerifyUserModelsDeletedHandler(base.BaseHandler):
GET_HANDLER_ERROR_RETURN_TYPE = feconf.HANDLER_TYPE_JSON
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'GET': {
'user_id': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def get(self):
user_id = self.normalized_request.get('user_id')
user_is_deleted = wipeout_service.verify_user_deleted(
user_id, include_delete_at_end_models=True)
self.render_json({'related_models_exist': not user_is_deleted})
class DeleteUserHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'DELETE': {
'user_id': {
'schema': {
'type': 'basestring'
}
},
'username': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_delete_any_user
def delete(self):
user_id = self.normalized_request.get('user_id')
username = self.normalized_request.get('username')
user_id_from_username = (
user_services.get_user_id_from_username(username))
if user_id_from_username is None:
raise self.InvalidInputException(
'The username doesn\'t belong to any user'
)
if user_id_from_username != user_id:
raise self.InvalidInputException(
'The user ID retrieved from the username and '
'the user ID provided by admin differ.'
)
wipeout_service.pre_delete_user(user_id)
self.render_json({'success': True})
class UpdateBlogPostHandler(base.BaseHandler):
URL_PATH_ARGS_SCHEMAS = {}
HANDLER_ARGS_SCHEMAS = {
'PUT': {
'blog_post_id': {
'schema': {
'type': 'basestring'
}
},
'author_username': {
'schema': {
'type': 'basestring',
'validators': [{
'id': 'has_length_at_most',
'max_value': constants.MAX_USERNAME_LENGTH
}]
}
},
'published_on': {
'schema': {
'type': 'basestring'
}
}
}
}
@acl_decorators.can_access_admin_page
def put(self):
blog_post_id = self.normalized_payload.get('blog_post_id')
author_username = self.normalized_payload.get('author_username')
published_on = self.normalized_payload.get('published_on')
author_id = user_services.get_user_id_from_username(author_username)
if author_id is None:
raise self.InvalidInputException(
'Invalid username: %s' % author_username)
user_actions = user_services.get_user_actions_info(author_id).actions
if role_services.ACTION_ACCESS_BLOG_DASHBOARD not in user_actions:
raise self.InvalidInputException(
'User does not have enough rights to be blog post author.')
blog_post = (
blog_services.get_blog_post_by_id(blog_post_id, strict=False))
if blog_post is None:
raise self.PageNotFoundException(
Exception(
'The blog post with the given id or url doesn\'t exist.'))
blog_services.update_blog_models_author_and_published_on_date(
blog_post_id, author_id, published_on)
self.render_json({})
| true | true |
f723f6aed494f61892583333010aad58dbc25f9a | 1,503 | py | Python | test/test_maintenance_configuration_api.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | test/test_maintenance_configuration_api.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | test/test_maintenance_configuration_api.py | cvent/octopus-deploy-api-client | 0e03e842e1beb29b132776aee077df570b88366a | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_client.maintenance_configuration_api import MaintenanceConfigurationApi # noqa: E501
from octopus_deploy_swagger_client.rest import ApiException
class TestMaintenanceConfigurationApi(unittest.TestCase):
"""MaintenanceConfigurationApi unit test stubs"""
def setUp(self):
self.api = octopus_deploy_client.maintenance_configuration_api.MaintenanceConfigurationApi() # noqa: E501
def tearDown(self):
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_get_action(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_get_action
"""
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_update_action(self):
"""Test case for custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_update_action
"""
pass
if __name__ == '__main__':
unittest.main()
| 31.978723 | 129 | 0.795742 |
from __future__ import absolute_import
import unittest
import octopus_deploy_swagger_client
from octopus_deploy_client.maintenance_configuration_api import MaintenanceConfigurationApi
from octopus_deploy_swagger_client.rest import ApiException
class TestMaintenanceConfigurationApi(unittest.TestCase):
def setUp(self):
self.api = octopus_deploy_client.maintenance_configuration_api.MaintenanceConfigurationApi()
def tearDown(self):
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_get_action(self):
pass
def test_custom_action_response_descriptor_octopus_server_web_api_actions_maintenance_configuration_update_action(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f723f6d3146fbf9c9696d5956391430c0797a221 | 6,843 | py | Python | src/azure-cli/setup.py | t-bzhan/azure-cli | d64b25204b661438e9284f261bc5a11f3221c837 | [
"MIT"
] | null | null | null | src/azure-cli/setup.py | t-bzhan/azure-cli | d64b25204b661438e9284f261bc5a11f3221c837 | [
"MIT"
] | null | null | null | src/azure-cli/setup.py | t-bzhan/azure-cli | d64b25204b661438e9284f261bc5a11f3221c837 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
from codecs import open
from setuptools import setup, find_packages
import sys
try:
from azure_cli_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.20.0"
# If we have source, validate that our version numbers match
# This should prevent uploading releases with mismatched versions.
try:
with open('azure/cli/__main__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/__main__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'antlr4-python3-runtime~=4.7.2',
'azure-appconfiguration~=1.1.1',
'azure-batch~=10.0.0',
'azure-cli-core=={}'.format(VERSION),
'azure-cosmos~=3.0,>=3.0.2',
'azure-datalake-store~=0.0.49',
'azure-functions-devops-build~=0.0.22',
'azure-graphrbac~=0.60.0',
'azure-keyvault~=1.1.0',
'azure-keyvault-administration==4.0.0b3',
'azure-mgmt-advisor>=2.0.1,<3.0.0',
'azure-mgmt-apimanagement~=0.2.0',
'azure-mgmt-applicationinsights~=0.1.1',
'azure-mgmt-appconfiguration~=1.0.1',
'azure-mgmt-authorization~=0.61.0',
'azure-mgmt-batch~=9.0.0',
'azure-mgmt-batchai~=2.0',
'azure-mgmt-billing==1.0.0',
'azure-mgmt-botservice~=0.3.0',
'azure-mgmt-cdn==7.0.0',
'azure-mgmt-cognitiveservices~=6.3.0',
'azure-mgmt-compute~=19.0.0',
'azure-mgmt-consumption~=2.0',
'azure-mgmt-containerinstance~=1.4',
'azure-mgmt-containerregistry==3.0.0rc17',
'azure-mgmt-cosmosdb~=3.0.0',
'azure-mgmt-containerservice~=9.4.0',
'azure-mgmt-databoxedge~=0.2.0',
'azure-mgmt-datalake-analytics~=0.2.1',
'azure-mgmt-datalake-store~=0.5.0',
'azure-mgmt-datamigration~=4.1.0',
'azure-mgmt-deploymentmanager~=0.2.0',
'azure-mgmt-devtestlabs~=4.0',
'azure-mgmt-dns~=2.1',
'azure-mgmt-eventgrid==3.0.0rc7',
'azure-mgmt-eventhub~=4.1.0',
'azure-mgmt-hdinsight~=2.2.0',
'azure-mgmt-imagebuilder~=0.4.0',
'azure-mgmt-iotcentral~=4.1.0',
'azure-mgmt-iothub~=0.12.0',
'azure-mgmt-iothubprovisioningservices~=0.2.0',
'azure-mgmt-keyvault==8.0.0',
'azure-mgmt-kusto~=0.3.0',
'azure-mgmt-loganalytics~=8.0.0',
'azure-mgmt-managedservices~=1.0',
'azure-mgmt-managementgroups~=0.1',
'azure-mgmt-maps~=0.1.0',
'azure-mgmt-marketplaceordering~=0.1',
'azure-mgmt-media~=3.0',
'azure-mgmt-monitor~=2.0.0',
'azure-mgmt-msi~=0.2',
'azure-mgmt-netapp~=0.16.0',
'azure-mgmt-network~=17.1.0',
'azure-mgmt-policyinsights~=0.5.0',
'azure-mgmt-privatedns~=0.1.0',
'azure-mgmt-rdbms~=3.1.0rc1',
'azure-mgmt-recoveryservices~=0.4.0',
'azure-mgmt-recoveryservicesbackup~=0.11.0',
'azure-mgmt-redhatopenshift==0.1.0',
'azure-mgmt-redis~=7.0.0rc1',
'azure-mgmt-relay~=0.1.0',
# 'azure-mgmt-reservations~=0.6.0',
'azure-mgmt-reservations==0.6.0', # TODO: Use requirements.txt instead of '==' #9781
'azure-mgmt-resource==12.0.0',
'azure-mgmt-search~=8.0',
'azure-mgmt-security~=0.6.0',
'azure-mgmt-servicebus~=0.6.0',
'azure-mgmt-servicefabric~=0.5.0',
'azure-mgmt-signalr~=0.4.0',
'azure-mgmt-sql~=0.26.0',
'azure-mgmt-sqlvirtualmachine~=0.5.0',
'azure-mgmt-storage~=17.0.0',
'azure-mgmt-trafficmanager~=0.51.0',
'azure-mgmt-web~=0.48.0',
'azure-mgmt-synapse~=0.6.0',
'azure-multiapi-storage~=0.6.0',
'azure-loganalytics~=0.1.0',
'azure-storage-common~=1.4',
'azure-synapse-accesscontrol~=0.2.0',
'azure-synapse-artifacts~=0.3.0',
'azure-synapse-spark~=0.2.0',
'fabric~=2.4',
'jsmin~=2.2.2',
'pytz==2019.1',
'scp~=0.13.2',
'sshtunnel~=0.1.4',
'urllib3[secure]>=1.25.9,<2.0.0',
'vsts-cd-manager~=1.0.0,>=1.0.2',
'websocket-client~=0.56.0',
'xmltodict~=0.12',
'javaproperties==0.5.1',
'jsondiff==1.2.0',
'semver==2.13.0'
]
TESTS_REQUIRE = [
'mock~=4.0'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli',
version=VERSION,
description='Microsoft Azure Command-Line Tools',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
scripts=[
'az',
'az.completion.sh',
'az.bat',
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests", "azure", "azure.cli"]),
install_requires=DEPENDENCIES,
python_requires='>=3.6.0',
package_data={
'azure.cli.command_modules.acr': ['*.json'],
'azure.cli.command_modules.botservice': ['*.json', '*.config'],
'azure.cli.command_modules.monitor.operations': ['autoscale-parameters-template.json'],
'azure.cli.command_modules.servicefabric': [
'template/windows/template.json',
'template/windows/parameter.json',
'template/linux/template.json',
'template/linux/parameter.json',
'template/service/template.json',
'template/service/parameter.json'
],
'azure.cli.command_modules.appservice': [
'resources/WindowsFunctionsStacks.json',
'resources/LinuxFunctionsStacks.json',
'resources/WebappRuntimeStacks.json',
'resources/GenerateRandomAppNames.json'
],
'azure.cli.command_modules.rdbms': [
'randomname/adjectives.txt',
'randomname/nouns.txt'
]
},
cmdclass=cmdclass
)
| 34.044776 | 103 | 0.604998 |
from __future__ import print_function
from codecs import open
from setuptools import setup, find_packages
import sys
try:
from azure_cli_bdist_wheel import cmdclass
except ImportError:
from distutils import log as logger
logger.warn("Wheel is not available, disabling bdist_wheel hook")
cmdclass = {}
VERSION = "2.20.0"
try:
with open('azure/cli/__main__.py', 'r', encoding='utf-8') as f:
content = f.read()
except OSError:
pass
else:
import re
m = re.search(r'__version__\s*=\s*[\'"](.+?)[\'"]', content)
if not m:
print('Could not find __version__ in azure/cli/__main__.py')
sys.exit(1)
if m.group(1) != VERSION:
print('Expected __version__ = "{}"; found "{}"'.format(VERSION, m.group(1)))
sys.exit(1)
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'License :: OSI Approved :: MIT License',
]
DEPENDENCIES = [
'antlr4-python3-runtime~=4.7.2',
'azure-appconfiguration~=1.1.1',
'azure-batch~=10.0.0',
'azure-cli-core=={}'.format(VERSION),
'azure-cosmos~=3.0,>=3.0.2',
'azure-datalake-store~=0.0.49',
'azure-functions-devops-build~=0.0.22',
'azure-graphrbac~=0.60.0',
'azure-keyvault~=1.1.0',
'azure-keyvault-administration==4.0.0b3',
'azure-mgmt-advisor>=2.0.1,<3.0.0',
'azure-mgmt-apimanagement~=0.2.0',
'azure-mgmt-applicationinsights~=0.1.1',
'azure-mgmt-appconfiguration~=1.0.1',
'azure-mgmt-authorization~=0.61.0',
'azure-mgmt-batch~=9.0.0',
'azure-mgmt-batchai~=2.0',
'azure-mgmt-billing==1.0.0',
'azure-mgmt-botservice~=0.3.0',
'azure-mgmt-cdn==7.0.0',
'azure-mgmt-cognitiveservices~=6.3.0',
'azure-mgmt-compute~=19.0.0',
'azure-mgmt-consumption~=2.0',
'azure-mgmt-containerinstance~=1.4',
'azure-mgmt-containerregistry==3.0.0rc17',
'azure-mgmt-cosmosdb~=3.0.0',
'azure-mgmt-containerservice~=9.4.0',
'azure-mgmt-databoxedge~=0.2.0',
'azure-mgmt-datalake-analytics~=0.2.1',
'azure-mgmt-datalake-store~=0.5.0',
'azure-mgmt-datamigration~=4.1.0',
'azure-mgmt-deploymentmanager~=0.2.0',
'azure-mgmt-devtestlabs~=4.0',
'azure-mgmt-dns~=2.1',
'azure-mgmt-eventgrid==3.0.0rc7',
'azure-mgmt-eventhub~=4.1.0',
'azure-mgmt-hdinsight~=2.2.0',
'azure-mgmt-imagebuilder~=0.4.0',
'azure-mgmt-iotcentral~=4.1.0',
'azure-mgmt-iothub~=0.12.0',
'azure-mgmt-iothubprovisioningservices~=0.2.0',
'azure-mgmt-keyvault==8.0.0',
'azure-mgmt-kusto~=0.3.0',
'azure-mgmt-loganalytics~=8.0.0',
'azure-mgmt-managedservices~=1.0',
'azure-mgmt-managementgroups~=0.1',
'azure-mgmt-maps~=0.1.0',
'azure-mgmt-marketplaceordering~=0.1',
'azure-mgmt-media~=3.0',
'azure-mgmt-monitor~=2.0.0',
'azure-mgmt-msi~=0.2',
'azure-mgmt-netapp~=0.16.0',
'azure-mgmt-network~=17.1.0',
'azure-mgmt-policyinsights~=0.5.0',
'azure-mgmt-privatedns~=0.1.0',
'azure-mgmt-rdbms~=3.1.0rc1',
'azure-mgmt-recoveryservices~=0.4.0',
'azure-mgmt-recoveryservicesbackup~=0.11.0',
'azure-mgmt-redhatopenshift==0.1.0',
'azure-mgmt-redis~=7.0.0rc1',
'azure-mgmt-relay~=0.1.0',
'azure-mgmt-reservations==0.6.0', 'azure-mgmt-resource==12.0.0',
'azure-mgmt-search~=8.0',
'azure-mgmt-security~=0.6.0',
'azure-mgmt-servicebus~=0.6.0',
'azure-mgmt-servicefabric~=0.5.0',
'azure-mgmt-signalr~=0.4.0',
'azure-mgmt-sql~=0.26.0',
'azure-mgmt-sqlvirtualmachine~=0.5.0',
'azure-mgmt-storage~=17.0.0',
'azure-mgmt-trafficmanager~=0.51.0',
'azure-mgmt-web~=0.48.0',
'azure-mgmt-synapse~=0.6.0',
'azure-multiapi-storage~=0.6.0',
'azure-loganalytics~=0.1.0',
'azure-storage-common~=1.4',
'azure-synapse-accesscontrol~=0.2.0',
'azure-synapse-artifacts~=0.3.0',
'azure-synapse-spark~=0.2.0',
'fabric~=2.4',
'jsmin~=2.2.2',
'pytz==2019.1',
'scp~=0.13.2',
'sshtunnel~=0.1.4',
'urllib3[secure]>=1.25.9,<2.0.0',
'vsts-cd-manager~=1.0.0,>=1.0.2',
'websocket-client~=0.56.0',
'xmltodict~=0.12',
'javaproperties==0.5.1',
'jsondiff==1.2.0',
'semver==2.13.0'
]
TESTS_REQUIRE = [
'mock~=4.0'
]
with open('README.rst', 'r', encoding='utf-8') as f:
README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
HISTORY = f.read()
setup(
name='azure-cli',
version=VERSION,
description='Microsoft Azure Command-Line Tools',
long_description=README + '\n\n' + HISTORY,
license='MIT',
author='Microsoft Corporation',
author_email='azpycli@microsoft.com',
url='https://github.com/Azure/azure-cli',
zip_safe=False,
classifiers=CLASSIFIERS,
scripts=[
'az',
'az.completion.sh',
'az.bat',
],
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests", "azure", "azure.cli"]),
install_requires=DEPENDENCIES,
python_requires='>=3.6.0',
package_data={
'azure.cli.command_modules.acr': ['*.json'],
'azure.cli.command_modules.botservice': ['*.json', '*.config'],
'azure.cli.command_modules.monitor.operations': ['autoscale-parameters-template.json'],
'azure.cli.command_modules.servicefabric': [
'template/windows/template.json',
'template/windows/parameter.json',
'template/linux/template.json',
'template/linux/parameter.json',
'template/service/template.json',
'template/service/parameter.json'
],
'azure.cli.command_modules.appservice': [
'resources/WindowsFunctionsStacks.json',
'resources/LinuxFunctionsStacks.json',
'resources/WebappRuntimeStacks.json',
'resources/GenerateRandomAppNames.json'
],
'azure.cli.command_modules.rdbms': [
'randomname/adjectives.txt',
'randomname/nouns.txt'
]
},
cmdclass=cmdclass
)
| true | true |
f723f6f2ec0f62ea9f018768408724d303030146 | 1,583 | py | Python | valid_subsequence.py | GerardCod/algoexpert-python | 35d7f635f68e0d28eaead815f653bf749aa275cb | [
"Apache-2.0"
] | null | null | null | valid_subsequence.py | GerardCod/algoexpert-python | 35d7f635f68e0d28eaead815f653bf749aa275cb | [
"Apache-2.0"
] | null | null | null | valid_subsequence.py | GerardCod/algoexpert-python | 35d7f635f68e0d28eaead815f653bf749aa275cb | [
"Apache-2.0"
] | null | null | null | def validateSubSequence(array, sequence):
"""
### Description
validateSubSequence -> validates if a sequence of elements is a subsequence of a list.
### Parameters
- array: the list where it will validate the subsequence.
- sequence: the potential subsequence of elements
### Returns
- True when the sequence is a valid subsequence of array.
- False when the sequence is not a valid subsequence of array.
"""
arrIdx = 0
seqIdx = 0
while arrIdx < len(array) and seqIdx < len(sequence):
if array[arrIdx] == sequence[seqIdx]:
seqIdx += 1
arrIdx += 1
return seqIdx == len(sequence)
def validateSubSequenceFor(array, sequence):
"""
### Description
validateSubSequence -> validates if a sequence of elements is a subsequence of a list.
### Parameters
array: the list where it will validate the subsequence.
sequence: the potential subsequence of elements
### Returns
- True when the sequence is a valid subsequence of array.
- False when the sequence is not a valid subsequence of array.
"""
seqIdx = 0
for element in array:
if seqIdx == len(sequence):
break
if element == sequence[seqIdx]:
seqIdx += 1
return seqIdx == len(sequence)
if __name__ == "__main__":
print(validateSubSequence([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10]))
print(validateSubSequenceFor([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10])) | 29.867925 | 94 | 0.598863 | def validateSubSequence(array, sequence):
arrIdx = 0
seqIdx = 0
while arrIdx < len(array) and seqIdx < len(sequence):
if array[arrIdx] == sequence[seqIdx]:
seqIdx += 1
arrIdx += 1
return seqIdx == len(sequence)
def validateSubSequenceFor(array, sequence):
seqIdx = 0
for element in array:
if seqIdx == len(sequence):
break
if element == sequence[seqIdx]:
seqIdx += 1
return seqIdx == len(sequence)
if __name__ == "__main__":
print(validateSubSequence([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10]))
print(validateSubSequenceFor([5, 1, 22, 25, 6, -1, 8, 10], [1, 6, -1, 10])) | true | true |
f723f81e9a3f4e12dffa9c9a8c28e3e333ab9a3c | 3,092 | py | Python | fhirbug/Fhir/Resources/address.py | VerdantAI/fhirbug | 8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d | [
"MIT"
] | 8 | 2019-01-06T18:11:20.000Z | 2022-02-24T02:06:55.000Z | fhirbug/Fhir/Resources/address.py | VerdantAI/fhirbug | 8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d | [
"MIT"
] | 5 | 2019-01-25T14:15:35.000Z | 2021-06-01T23:22:41.000Z | fhirbug/Fhir/Resources/address.py | VerdantAI/fhirbug | 8a8e2555c0edfeee0a7edbc8d67f2fcb2edd3c2d | [
"MIT"
] | 3 | 2020-10-14T23:09:29.000Z | 2021-08-09T19:27:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b (http://hl7.org/fhir/StructureDefinition/Address) on 2019-01-25.
# 2019, SMART Health IT.
##
from . import element
class Address(element.Element):
""" An address expressed using postal conventions (as opposed to GPS or other
location definition formats).
An address expressed using postal conventions (as opposed to GPS or other
location definition formats). This data type may be used to convey
addresses for use in delivering mail as well as for visiting locations
which might not be valid for mail delivery. There are a variety of postal
address formats defined around the world.
"""
resource_type = "Address"
def __init__(self, jsondict=None, strict=True, **kwargs):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.city = None
""" Name of city, town etc..
Type `str`. """
self.country = None
""" Country (e.g. can be ISO 3166 2 or 3 letter code).
Type `str`. """
self.district = None
""" District name (aka county).
Type `str`. """
self.line = None
""" Street name, number, direction & P.O. Box etc..
List of `str` items. """
self.period = None
""" Time period when address was/is in use.
Type `Period` (represented as `dict` in JSON). """
self.postalCode = None
""" Postal code for area.
Type `str`. """
self.state = None
""" Sub-unit of country (abbreviations ok).
Type `str`. """
self.text = None
""" Text representation of the address.
Type `str`. """
self.type = None
""" postal | physical | both.
Type `str`. """
self.use = None
""" home | work | temp | old | billing - purpose of this address.
Type `str`. """
super(Address, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(Address, self).elementProperties()
js.extend([
("city", "city", str, False, None, False),
("country", "country", str, False, None, False),
("district", "district", str, False, None, False),
("line", "line", str, True, None, False),
("period", "period", period.Period, False, None, False),
("postalCode", "postalCode", str, False, None, False),
("state", "state", str, False, None, False),
("text", "text", str, False, None, False),
("type", "type", str, False, None, False),
("use", "use", str, False, None, False),
])
return js
import sys
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period'] | 32.547368 | 104 | 0.583441 |
from . import element
class Address(element.Element):
resource_type = "Address"
def __init__(self, jsondict=None, strict=True, **kwargs):
self.city = None
self.country = None
self.district = None
self.line = None
self.period = None
self.postalCode = None
self.state = None
self.text = None
self.type = None
self.use = None
super(Address, self).__init__(jsondict=jsondict, strict=strict, **kwargs)
def elementProperties(self):
js = super(Address, self).elementProperties()
js.extend([
("city", "city", str, False, None, False),
("country", "country", str, False, None, False),
("district", "district", str, False, None, False),
("line", "line", str, True, None, False),
("period", "period", period.Period, False, None, False),
("postalCode", "postalCode", str, False, None, False),
("state", "state", str, False, None, False),
("text", "text", str, False, None, False),
("type", "type", str, False, None, False),
("use", "use", str, False, None, False),
])
return js
import sys
try:
from . import period
except ImportError:
period = sys.modules[__package__ + '.period'] | true | true |
f723f8a8d4760f751862af7dd8140ab1cad4f937 | 755 | py | Python | setup.py | salesforce/bite | 0619bc6d87b81ec65cf311906da3889043176ead | [
"BSD-3-Clause"
] | 6 | 2020-12-09T01:57:13.000Z | 2021-10-09T01:50:21.000Z | setup.py | salesforce/bite | 0619bc6d87b81ec65cf311906da3889043176ead | [
"BSD-3-Clause"
] | 1 | 2021-02-16T14:50:09.000Z | 2021-02-23T07:29:15.000Z | setup.py | salesforce/bite | 0619bc6d87b81ec65cf311906da3889043176ead | [
"BSD-3-Clause"
] | null | null | null | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bite",
version="0.1",
author="Samson Tan",
author_email="samson.tan@salesforce.com",
description="A tokenizer that splits words into bases and inflections.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/salesforce/bite",
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
classifiers=[
"Programming Language :: Python :: 3",
#"License :: OSI Approved :: BSD License",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.2 | 76 | 0.65298 | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="bite",
version="0.1",
author="Samson Tan",
author_email="samson.tan@salesforce.com",
description="A tokenizer that splits words into bases and inflections.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/salesforce/bite",
package_dir={"": "src"},
packages=setuptools.find_packages("src"),
classifiers=[
"Programming Language :: Python :: 3",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| true | true |
f723f8c5703335b1a0fa5a181861df8ecc2a13f8 | 6,240 | py | Python | python/istio_api/mixer/v1/config/client/service_pb2.py | mt-inside/api | 3197d4dee332beb55f830899f37091c9899833f9 | [
"Apache-2.0"
] | 3 | 2020-11-30T15:35:37.000Z | 2022-01-06T14:17:18.000Z | python/istio_api/mixer/v1/config/client/service_pb2.py | mt-inside/api | 3197d4dee332beb55f830899f37091c9899833f9 | [
"Apache-2.0"
] | 54 | 2020-06-23T17:34:04.000Z | 2022-03-31T02:04:06.000Z | python/istio_api/mixer/v1/config/client/service_pb2.py | mt-inside/api | 3197d4dee332beb55f830899f37091c9899833f9 | [
"Apache-2.0"
] | 12 | 2020-07-14T23:59:57.000Z | 2022-03-22T09:59:18.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: mixer/v1/config/client/service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mixer/v1/config/client/service.proto',
package='istio.mixer.v1.config.client',
syntax='proto3',
serialized_options=_b('Z#istio.io/api/mixer/v1/config/client\310\341\036\000\250\342\036\000\360\341\036\000\330\342\036\001'),
serialized_pb=_b('\n$mixer/v1/config/client/service.proto\x12\x1cistio.mixer.v1.config.client\x1a\x14gogoproto/gogo.proto\"\xc7\x01\n\x0cIstioService\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0f\n\x07service\x18\x04 \x01(\t\x12\x46\n\x06labels\x18\x05 \x03(\x0b\x32\x36.istio.mixer.v1.config.client.IstioService.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x35Z#istio.io/api/mixer/v1/config/client\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\xd8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,])
_ISTIOSERVICE_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=292,
)
_ISTIOSERVICE = _descriptor.Descriptor(
name='IstioService',
full_name='istio.mixer.v1.config.client.IstioService',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.v1.config.client.IstioService.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='istio.mixer.v1.config.client.IstioService.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domain', full_name='istio.mixer.v1.config.client.IstioService.domain', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='istio.mixer.v1.config.client.IstioService.service', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='istio.mixer.v1.config.client.IstioService.labels', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ISTIOSERVICE_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=292,
)
_ISTIOSERVICE_LABELSENTRY.containing_type = _ISTIOSERVICE
_ISTIOSERVICE.fields_by_name['labels'].message_type = _ISTIOSERVICE_LABELSENTRY
DESCRIPTOR.message_types_by_name['IstioService'] = _ISTIOSERVICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IstioService = _reflection.GeneratedProtocolMessageType('IstioService', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ISTIOSERVICE_LABELSENTRY,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService.LabelsEntry)
})
,
'DESCRIPTOR' : _ISTIOSERVICE,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService)
})
_sym_db.RegisterMessage(IstioService)
_sym_db.RegisterMessage(IstioService.LabelsEntry)
DESCRIPTOR._options = None
_ISTIOSERVICE_LABELSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 41.6 | 624 | 0.747276 |
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
_sym_db = _symbol_database.Default()
from gogoproto import gogo_pb2 as gogoproto_dot_gogo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='mixer/v1/config/client/service.proto',
package='istio.mixer.v1.config.client',
syntax='proto3',
serialized_options=_b('Z#istio.io/api/mixer/v1/config/client\310\341\036\000\250\342\036\000\360\341\036\000\330\342\036\001'),
serialized_pb=_b('\n$mixer/v1/config/client/service.proto\x12\x1cistio.mixer.v1.config.client\x1a\x14gogoproto/gogo.proto\"\xc7\x01\n\x0cIstioService\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tnamespace\x18\x02 \x01(\t\x12\x0e\n\x06\x64omain\x18\x03 \x01(\t\x12\x0f\n\x07service\x18\x04 \x01(\t\x12\x46\n\x06labels\x18\x05 \x03(\x0b\x32\x36.istio.mixer.v1.config.client.IstioService.LabelsEntry\x1a-\n\x0bLabelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x35Z#istio.io/api/mixer/v1/config/client\xc8\xe1\x1e\x00\xa8\xe2\x1e\x00\xf0\xe1\x1e\x00\xd8\xe2\x1e\x01\x62\x06proto3')
,
dependencies=[gogoproto_dot_gogo__pb2.DESCRIPTOR,])
_ISTIOSERVICE_LABELSENTRY = _descriptor.Descriptor(
name='LabelsEntry',
full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value', full_name='istio.mixer.v1.config.client.IstioService.LabelsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=247,
serialized_end=292,
)
_ISTIOSERVICE = _descriptor.Descriptor(
name='IstioService',
full_name='istio.mixer.v1.config.client.IstioService',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.mixer.v1.config.client.IstioService.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='namespace', full_name='istio.mixer.v1.config.client.IstioService.namespace', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='domain', full_name='istio.mixer.v1.config.client.IstioService.domain', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='service', full_name='istio.mixer.v1.config.client.IstioService.service', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='labels', full_name='istio.mixer.v1.config.client.IstioService.labels', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_ISTIOSERVICE_LABELSENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=93,
serialized_end=292,
)
_ISTIOSERVICE_LABELSENTRY.containing_type = _ISTIOSERVICE
_ISTIOSERVICE.fields_by_name['labels'].message_type = _ISTIOSERVICE_LABELSENTRY
DESCRIPTOR.message_types_by_name['IstioService'] = _ISTIOSERVICE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IstioService = _reflection.GeneratedProtocolMessageType('IstioService', (_message.Message,), {
'LabelsEntry' : _reflection.GeneratedProtocolMessageType('LabelsEntry', (_message.Message,), {
'DESCRIPTOR' : _ISTIOSERVICE_LABELSENTRY,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService.LabelsEntry)
})
,
'DESCRIPTOR' : _ISTIOSERVICE,
'__module__' : 'mixer.v1.config.client.service_pb2'
# @@protoc_insertion_point(class_scope:istio.mixer.v1.config.client.IstioService)
})
_sym_db.RegisterMessage(IstioService)
_sym_db.RegisterMessage(IstioService.LabelsEntry)
DESCRIPTOR._options = None
_ISTIOSERVICE_LABELSENTRY._options = None
# @@protoc_insertion_point(module_scope)
| true | true |
f723f948cdc428c8f98ae5492744213241a82570 | 3,751 | py | Python | nest_csa_fan.py | danhje/neural-network-connection-algorithms-tester | 9183d8ba0cccf4bc98284d1c859471d1eba901a3 | [
"MIT"
] | null | null | null | nest_csa_fan.py | danhje/neural-network-connection-algorithms-tester | 9183d8ba0cccf4bc98284d1c859471d1eba901a3 | [
"MIT"
] | null | null | null | nest_csa_fan.py | danhje/neural-network-connection-algorithms-tester | 9183d8ba0cccf4bc98284d1c859471d1eba901a3 | [
"MIT"
] | null | null | null | '''
@author: Daniel Hjertholm
Tests for fan-in / -out networks created by the CSA implementation in NEST.
'''
import numpy.random as rnd
import random
import nest
import csa
from testsuite.fan_test import FanTester
class NEST_FanTester(FanTester):
'''
Tests for fan-in / -out networks created by the CSA implementation in NEST.
'''
def __init__(self, N_s, N_t, C, e_min=10):
'''
Construct a test object.
Parameters
----------
N_s : Number of nodes in source population.
N_t : Number of nodes in target population.
C : In-degree (number of connections per target neuron).
e_min: Minimum expected number of observations in each bin.
'''
nest.set_verbosity('M_FATAL')
FanTester.__init__(self, N_s=N_s, N_t=N_t, C=C, e_min=e_min)
def _reset(self, seed):
'''
Reset the simulator and seed the PRNGs.
Parameters
----------
seed: PRNG seed value.
'''
nest.ResetKernel()
# Set PRNG seed values:
if seed == None:
seed = rnd.randint(10 ** 10)
seed = 4 * seed # Reduces probability of overlapping seed values.
random.seed(seed) # CSA uses random.
rnd.seed(seed + 1) # _get_expected_distribution uses numpy.random.
nest.SetKernelStatus({'grng_seed': seed + 2,
'rng_seeds': [seed + 3]})
def _build(self):
'''Create populations.'''
self._source_pop = nest.Create('iaf_neuron', self._N_s)
self._target_pop = nest.Create('iaf_neuron', self._N_t)
def _connect(self):
'''Connect populations.'''
finite_set = csa.cross(xrange(self._N_s), xrange(self._N_t))
if self._fan == 'in':
cs = csa.cset(csa.random(fanIn=self._C) * finite_set)
else:
cs = csa.cset(csa.random(fanOut=self._C) * finite_set)
nest.CGConnect(self._source_pop, self._target_pop, csa.cset(cs))
def _degrees(self):
'''Return list of degrees.'''
connections = nest.GetConnections(source=self._source_pop)
i = 0 if self._fan == 'in' else 1
connections = [conn[i] for conn in connections]
return self._counter(connections)
class FanInTester(NEST_FanTester):
'''Tests for fan-in networks created by the CSA implementation in NEST.'''
def __init__(self, N_s, N_t, C, e_min=10):
'''
Construct a test object.
Parameters
----------
N_s : Number of nodes in source population.
N_t : Number of nodes in target population.
C : In-degree (number of connections per target neuron).
e_min: Minimum expected number of observations in each bin.
'''
self._fan = 'in'
NEST_FanTester.__init__(self, N_s, N_t, C, e_min=e_min)
class FanOutTester(NEST_FanTester):
'''Tests for fan-out networks created by the CSA implementation in NEST.'''
def __init__(self, N_s, N_t, C, e_min=10):
'''
Construct a test object.
Parameters
----------
N_s : Number of nodes in source population.
N_t : Number of nodes in target population.
C : In-degree (number of connections per target neuron).
e_min: Minimum expected number of observations in each bin.
'''
self._fan = 'out'
NEST_FanTester.__init__(self, N_s, N_t, C, e_min=e_min)
if __name__ == '__main__':
test = FanInTester(N_s=100, N_t=100, C=100)
ks, p = test.two_level_test(n_runs=100, start_seed=0)
print 'p-value of KS-test of uniformity:', p
test.show_CDF()
test.show_histogram()
| 30.008 | 79 | 0.596641 | '''
@author: Daniel Hjertholm
Tests for fan-in / -out networks created by the CSA implementation in NEST.
'''
import numpy.random as rnd
import random
import nest
import csa
from testsuite.fan_test import FanTester
class NEST_FanTester(FanTester):
'''
Tests for fan-in / -out networks created by the CSA implementation in NEST.
'''
def __init__(self, N_s, N_t, C, e_min=10):
'''
Construct a test object.
Parameters
----------
N_s : Number of nodes in source population.
N_t : Number of nodes in target population.
C : In-degree (number of connections per target neuron).
e_min: Minimum expected number of observations in each bin.
'''
nest.set_verbosity('M_FATAL')
FanTester.__init__(self, N_s=N_s, N_t=N_t, C=C, e_min=e_min)
def _reset(self, seed):
'''
Reset the simulator and seed the PRNGs.
Parameters
----------
seed: PRNG seed value.
'''
nest.ResetKernel()
if seed == None:
seed = rnd.randint(10 ** 10)
seed = 4 * seed
random.seed(seed)
rnd.seed(seed + 1)
nest.SetKernelStatus({'grng_seed': seed + 2,
'rng_seeds': [seed + 3]})
def _build(self):
'''Create populations.'''
self._source_pop = nest.Create('iaf_neuron', self._N_s)
self._target_pop = nest.Create('iaf_neuron', self._N_t)
def _connect(self):
'''Connect populations.'''
finite_set = csa.cross(xrange(self._N_s), xrange(self._N_t))
if self._fan == 'in':
cs = csa.cset(csa.random(fanIn=self._C) * finite_set)
else:
cs = csa.cset(csa.random(fanOut=self._C) * finite_set)
nest.CGConnect(self._source_pop, self._target_pop, csa.cset(cs))
def _degrees(self):
'''Return list of degrees.'''
connections = nest.GetConnections(source=self._source_pop)
i = 0 if self._fan == 'in' else 1
connections = [conn[i] for conn in connections]
return self._counter(connections)
class FanInTester(NEST_FanTester):
'''Tests for fan-in networks created by the CSA implementation in NEST.'''
def __init__(self, N_s, N_t, C, e_min=10):
'''
Construct a test object.
Parameters
----------
N_s : Number of nodes in source population.
N_t : Number of nodes in target population.
C : In-degree (number of connections per target neuron).
e_min: Minimum expected number of observations in each bin.
'''
self._fan = 'in'
NEST_FanTester.__init__(self, N_s, N_t, C, e_min=e_min)
class FanOutTester(NEST_FanTester):
'''Tests for fan-out networks created by the CSA implementation in NEST.'''
def __init__(self, N_s, N_t, C, e_min=10):
'''
Construct a test object.
Parameters
----------
N_s : Number of nodes in source population.
N_t : Number of nodes in target population.
C : In-degree (number of connections per target neuron).
e_min: Minimum expected number of observations in each bin.
'''
self._fan = 'out'
NEST_FanTester.__init__(self, N_s, N_t, C, e_min=e_min)
if __name__ == '__main__':
test = FanInTester(N_s=100, N_t=100, C=100)
ks, p = test.two_level_test(n_runs=100, start_seed=0)
print 'p-value of KS-test of uniformity:', p
test.show_CDF()
test.show_histogram()
| false | true |
f723fa96b4b622359c95f9d0ad4cdd27364d7401 | 3,267 | py | Python | tests/test_paper_collection.py | h1-the-swan/paper_collection | f07ad5cd8c40ddd75df2031b15c49eee60f1d914 | [
"MIT"
] | null | null | null | tests/test_paper_collection.py | h1-the-swan/paper_collection | f07ad5cd8c40ddd75df2031b15c49eee60f1d914 | [
"MIT"
] | 2 | 2020-03-31T11:20:29.000Z | 2020-03-31T15:20:21.000Z | tests/test_paper_collection.py | h1-the-swan/paper_collection | f07ad5cd8c40ddd75df2031b15c49eee60f1d914 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Tests for `paper_collection` package."""
import unittest
from paper_collection import paper_collection
import pandas as pd
import numpy as np
class TestPaper_collection(unittest.TestCase):
"""Tests for `paper_collection` package."""
def setUp(self):
"""Set up test fixtures, if any."""
self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\t')
self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True)
self.num_papers = len(self.df_papers)
self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\t')
self.num_citations = len(self.df_citations)
self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\t')
self.authors_by_paper = self.get_authors_by_paper(self.df_authors)
def tearDown(self):
"""Tear down test fixtures, if any."""
def get_authors_by_paper(self, df_authors):
"""Get a dictionary mapping paper_id to author data
"""
author_data = {}
for paper_id, group in df_authors.groupby('PaperId'):
group = group.sort_values('AuthorSequenceNumber')
this_authors = []
for _, row in group.iterrows():
this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId})
author_data[paper_id] = this_authors
return author_data
def load_paper(self, prow):
paper_id = prow.PaperId
authors = self.authors_by_paper[paper_id]
return paper_collection.Paper(dataset='mag',
dataset_version='mag-2019-11-22',
paper_id=paper_id,
title=prow.PaperTitle,
display_title=prow.OriginalTitle,
doi=prow.Doi,
pub_date=prow.Date,
year=prow.Year,
venue=prow.OriginalVenue,
authors=authors,
node_rank=prow.flow)
def test_000_single_paper(self):
"""Load a single paper"""
prow = self.df_papers.iloc[0]
p = self.load_paper(prow)
assert p.display_title is not None
assert len(p.display_title)
def test_001_collection(self):
"""Load a collection"""
coll = paper_collection.PaperCollection(description="Paper Collection")
for _, prow in self.df_papers.iterrows():
p = self.load_paper(prow)
coll.papers.append(p)
assert len(coll) == self.num_papers
def test_002_graph(self):
"""Construct graph"""
coll = paper_collection.PaperCollection(description="Paper Collection")
for _, prow in self.df_papers.iterrows():
p = self.load_paper(prow)
coll.papers.append(p)
for _, row in self.df_citations.iterrows():
coll.citations.append((row.PaperId, row.PaperReferenceId))
G = coll.construct_graph()
assert G.number_of_nodes() == self.num_papers
assert G.number_of_edges() == self.num_citations
| 38.435294 | 95 | 0.585859 |
import unittest
from paper_collection import paper_collection
import pandas as pd
import numpy as np
class TestPaper_collection(unittest.TestCase):
def setUp(self):
self.df_papers = pd.read_csv('tests/jw_papers_mag2019.tsv', sep='\t')
self.df_papers.drop_duplicates(subset=['PaperId'], inplace=True)
self.num_papers = len(self.df_papers)
self.df_citations = pd.read_csv('tests/jw_citations_mag2019.tsv', sep='\t')
self.num_citations = len(self.df_citations)
self.df_authors = pd.read_csv('tests/jw_PaperAuthorAffiliations_mag2019.tsv', sep='\t')
self.authors_by_paper = self.get_authors_by_paper(self.df_authors)
def tearDown(self):
def get_authors_by_paper(self, df_authors):
author_data = {}
for paper_id, group in df_authors.groupby('PaperId'):
group = group.sort_values('AuthorSequenceNumber')
this_authors = []
for _, row in group.iterrows():
this_authors.append({'name': row.OriginalAuthor, 'author_id': row.AuthorId})
author_data[paper_id] = this_authors
return author_data
def load_paper(self, prow):
paper_id = prow.PaperId
authors = self.authors_by_paper[paper_id]
return paper_collection.Paper(dataset='mag',
dataset_version='mag-2019-11-22',
paper_id=paper_id,
title=prow.PaperTitle,
display_title=prow.OriginalTitle,
doi=prow.Doi,
pub_date=prow.Date,
year=prow.Year,
venue=prow.OriginalVenue,
authors=authors,
node_rank=prow.flow)
def test_000_single_paper(self):
prow = self.df_papers.iloc[0]
p = self.load_paper(prow)
assert p.display_title is not None
assert len(p.display_title)
def test_001_collection(self):
coll = paper_collection.PaperCollection(description="Paper Collection")
for _, prow in self.df_papers.iterrows():
p = self.load_paper(prow)
coll.papers.append(p)
assert len(coll) == self.num_papers
def test_002_graph(self):
coll = paper_collection.PaperCollection(description="Paper Collection")
for _, prow in self.df_papers.iterrows():
p = self.load_paper(prow)
coll.papers.append(p)
for _, row in self.df_citations.iterrows():
coll.citations.append((row.PaperId, row.PaperReferenceId))
G = coll.construct_graph()
assert G.number_of_nodes() == self.num_papers
assert G.number_of_edges() == self.num_citations
| true | true |
f723faadd5d49357ab40cc0e11d377652ed79a23 | 541 | py | Python | scripts/idc/sortsymbols.py | camden314/CacaoSDK | 57b7e0654595eb7a432ef1faec9b239a3854cf45 | [
"MIT"
] | 4 | 2021-01-26T10:00:43.000Z | 2021-08-06T21:35:15.000Z | scripts/idc/sortsymbols.py | camden314/CacaoSDK | 57b7e0654595eb7a432ef1faec9b239a3854cf45 | [
"MIT"
] | 2 | 2021-07-26T01:55:43.000Z | 2021-07-26T17:42:20.000Z | scripts/idc/sortsymbols.py | camden314/CacaoSDK | 57b7e0654595eb7a432ef1faec9b239a3854cf45 | [
"MIT"
] | 1 | 2021-06-01T17:40:01.000Z | 2021-06-01T17:40:01.000Z | import re
l = []
with open("functionsthunk.txt", "r") as f:
s = f.readlines()
for k, m in zip(s[0::2], s[1::2]):
if 'non-virtual' not in k:
l.append((k, m))
def sfun(a):
m = re.search(r"(?:non-virtual thunk to )?(.+?\(.*\)(?: const)?\n.+\n\n)", a)
print(a)
return m.group(1)
with open("functions.txt", 'w') as f:
def sfun(a):
m = re.search(r"(?:non-virtual thunk to )?(.+?)\(.*\)(?: const)?", a[0])
print(a)
return m.group(1)
l.sort(key=sfun)
f.write("".join([k + m for k, m in l]))
| 24.590909 | 81 | 0.502773 | import re
l = []
with open("functionsthunk.txt", "r") as f:
s = f.readlines()
for k, m in zip(s[0::2], s[1::2]):
if 'non-virtual' not in k:
l.append((k, m))
def sfun(a):
m = re.search(r"(?:non-virtual thunk to )?(.+?\(.*\)(?: const)?\n.+\n\n)", a)
print(a)
return m.group(1)
with open("functions.txt", 'w') as f:
def sfun(a):
m = re.search(r"(?:non-virtual thunk to )?(.+?)\(.*\)(?: const)?", a[0])
print(a)
return m.group(1)
l.sort(key=sfun)
f.write("".join([k + m for k, m in l]))
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.